[{"data":1,"prerenderedAt":7311},["ShallowReactive",2],{"/en-us/blog/tags/inside-gitlab/":3,"navigation-en-us":20,"banner-en-us":433,"footer-en-us":446,"inside GitLab-tag-page-en-us":658},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/inside-gitlab","tags",false,"",{"tag":9,"tagSlug":10},"inside GitLab","inside-gitlab",{"template":12},"BlogTag","content:en-us:blog:tags:inside-gitlab.yml","yaml","Inside Gitlab","content","en-us/blog/tags/inside-gitlab.yml","en-us/blog/tags/inside-gitlab","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":429,"_type":14,"title":430,"_source":16,"_file":431,"_stem":432,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":375,"minimal":406,"duo":420},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,185,190,296,356],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":167},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,146],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,136,141],{"text":134,"config":135},"Security & Compliance",{"href":129,"dataGaLocation":28,"dataGaName":134},{"text":137,"config":138},"Software Supply Chain Security",{"href":139,"dataGaLocation":28,"dataGaName":140},"/solutions/supply-chain/","Software supply chain security",{"text":142,"config":143},"Compliance & Governance",{"href":144,"dataGaLocation":28,"dataGaName":145},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":147,"link":148,"items":153},"Measurement",{"config":149},{"icon":150,"href":151,"dataGaName":152,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[154,158,162],{"text":155,"config":156},"Visibility & Measurement",{"href":151,"dataGaLocation":28,"dataGaName":157},"Visibility and Measurement",{"text":159,"config":160},"Value Stream Management",{"href":161,"dataGaLocation":28,"dataGaName":159},"/solutions/value-stream-management/",{"text":163,"config":164},"Analytics & Insights",{"href":165,"dataGaLocation":28,"dataGaName":166},"/solutions/analytics-and-insights/","Analytics and insights",{"title":168,"items":169},"GitLab for",[170,175,180],{"text":171,"config":172},"Enterprise",{"href":173,"dataGaLocation":28,"dataGaName":174},"/enterprise/","enterprise",{"text":176,"config":177},"Small Business",{"href":178,"dataGaLocation":28,"dataGaName":179},"/small-business/","small business",{"text":181,"config":182},"Public Sector",{"href":183,"dataGaLocation":28,"dataGaName":184},"/solutions/public-sector/","public sector",{"text":186,"config":187},"Pricing",{"href":188,"dataGaName":189,"dataGaLocation":28,"dataNavLevelOne":189},"/pricing/","pricing",{"text":191,"config":192,"link":194,"lists":198,"feature":283},"Resources",{"dataNavLevelOne":193},"resources",{"text":195,"config":196},"View all resources",{"href":197,"dataGaName":193,"dataGaLocation":28},"/resources/",[199,232,255],{"title":200,"items":201},"Getting started",[202,207,212,217,222,227],{"text":203,"config":204},"Install",{"href":205,"dataGaName":206,"dataGaLocation":28},"/install/","install",{"text":208,"config":209},"Quick start guides",{"href":210,"dataGaName":211,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":213,"config":214},"Learn",{"href":215,"dataGaLocation":28,"dataGaName":216},"https://university.gitlab.com/","learn",{"text":218,"config":219},"Product documentation",{"href":220,"dataGaName":221,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":223,"config":224},"Best practice videos",{"href":225,"dataGaName":226,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":228,"config":229},"Integrations",{"href":230,"dataGaName":231,"dataGaLocation":28},"/integrations/","integrations",{"title":233,"items":234},"Discover",[235,240,245,250],{"text":236,"config":237},"Customer success stories",{"href":238,"dataGaName":239,"dataGaLocation":28},"/customers/","customer success stories",{"text":241,"config":242},"Blog",{"href":243,"dataGaName":244,"dataGaLocation":28},"/blog/","blog",{"text":246,"config":247},"Remote",{"href":248,"dataGaName":249,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":251,"config":252},"TeamOps",{"href":253,"dataGaName":254,"dataGaLocation":28},"/teamops/","teamops",{"title":256,"items":257},"Connect",[258,263,268,273,278],{"text":259,"config":260},"GitLab Services",{"href":261,"dataGaName":262,"dataGaLocation":28},"/services/","services",{"text":264,"config":265},"Community",{"href":266,"dataGaName":267,"dataGaLocation":28},"/community/","community",{"text":269,"config":270},"Forum",{"href":271,"dataGaName":272,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":274,"config":275},"Events",{"href":276,"dataGaName":277,"dataGaLocation":28},"/events/","events",{"text":279,"config":280},"Partners",{"href":281,"dataGaName":282,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":284,"textColor":285,"text":286,"image":287,"link":291},"#2f2a6b","#fff","Insights for the future of software development",{"altText":288,"config":289},"the source promo card",{"src":290},"/images/navigation/the-source-promo-card.svg",{"text":292,"config":293},"Read the latest",{"href":294,"dataGaName":295,"dataGaLocation":28},"/the-source/","the source",{"text":297,"config":298,"lists":300},"Company",{"dataNavLevelOne":299},"company",[301],{"items":302},[303,308,314,316,321,326,331,336,341,346,351],{"text":304,"config":305},"About",{"href":306,"dataGaName":307,"dataGaLocation":28},"/company/","about",{"text":309,"config":310,"footerGa":313},"Jobs",{"href":311,"dataGaName":312,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":312},{"text":274,"config":315},{"href":276,"dataGaName":277,"dataGaLocation":28},{"text":317,"config":318},"Leadership",{"href":319,"dataGaName":320,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":322,"config":323},"Team",{"href":324,"dataGaName":325,"dataGaLocation":28},"/company/team/","team",{"text":327,"config":328},"Handbook",{"href":329,"dataGaName":330,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":332,"config":333},"Investor relations",{"href":334,"dataGaName":335,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":337,"config":338},"Trust Center",{"href":339,"dataGaName":340,"dataGaLocation":28},"/security/","trust center",{"text":342,"config":343},"AI Transparency Center",{"href":344,"dataGaName":345,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":347,"config":348},"Newsletter",{"href":349,"dataGaName":350,"dataGaLocation":28},"/company/contact/","newsletter",{"text":352,"config":353},"Press",{"href":354,"dataGaName":355,"dataGaLocation":28},"/press/","press",{"text":357,"config":358,"lists":359},"Contact us",{"dataNavLevelOne":299},[360],{"items":361},[362,365,370],{"text":35,"config":363},{"href":37,"dataGaName":364,"dataGaLocation":28},"talk to sales",{"text":366,"config":367},"Get help",{"href":368,"dataGaName":369,"dataGaLocation":28},"/support/","get help",{"text":371,"config":372},"Customer portal",{"href":373,"dataGaName":374,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":376,"login":377,"suggestions":384},"Close",{"text":378,"link":379},"To search repositories and projects, login to",{"text":380,"config":381},"gitlab.com",{"href":42,"dataGaName":382,"dataGaLocation":383},"search login","search",{"text":385,"default":386},"Suggestions",[387,389,393,395,399,403],{"text":57,"config":388},{"href":62,"dataGaName":57,"dataGaLocation":383},{"text":390,"config":391},"Code Suggestions (AI)",{"href":392,"dataGaName":390,"dataGaLocation":383},"/solutions/code-suggestions/",{"text":109,"config":394},{"href":111,"dataGaName":109,"dataGaLocation":383},{"text":396,"config":397},"GitLab on AWS",{"href":398,"dataGaName":396,"dataGaLocation":383},"/partners/technology-partners/aws/",{"text":400,"config":401},"GitLab on Google Cloud",{"href":402,"dataGaName":400,"dataGaLocation":383},"/partners/technology-partners/google-cloud-platform/",{"text":404,"config":405},"Why GitLab?",{"href":70,"dataGaName":404,"dataGaLocation":383},{"freeTrial":407,"mobileIcon":412,"desktopIcon":417},{"text":408,"config":409},"Start free trial",{"href":410,"dataGaName":33,"dataGaLocation":411},"https://gitlab.com/-/trials/new/","nav",{"altText":413,"config":414},"Gitlab Icon",{"src":415,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":413,"config":418},{"src":419,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-type.svg",{"freeTrial":421,"mobileIcon":425,"desktopIcon":427},{"text":422,"config":423},"Learn more about GitLab Duo",{"href":62,"dataGaName":424,"dataGaLocation":411},"gitlab duo",{"altText":413,"config":426},{"src":415,"dataGaName":416,"dataGaLocation":411},{"altText":413,"config":428},{"src":419,"dataGaName":416,"dataGaLocation":411},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":434,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":435,"titleMobile":435,"button":436,"config":441,"_id":443,"_type":14,"_source":16,"_file":444,"_stem":445,"_extension":19},"/shared/en-us/banner","GitLab 18 & the next step in intelligent DevSecOps. Join us June 24.",{"text":437,"config":438},"Register now",{"href":439,"dataGaName":440,"dataGaLocation":28},"/eighteen/","gitlab 18 banner",{"layout":442},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":447,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":448,"_id":654,"_type":14,"title":655,"_source":16,"_file":656,"_stem":657,"_extension":19},"/shared/en-us/main-footer",{"text":449,"source":450,"edit":456,"contribute":461,"config":466,"items":471,"minimal":646},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":451,"config":452},"View page source",{"href":453,"dataGaName":454,"dataGaLocation":455},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":457,"config":458},"Edit this page",{"href":459,"dataGaName":460,"dataGaLocation":455},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":462,"config":463},"Please contribute",{"href":464,"dataGaName":465,"dataGaLocation":455},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":467,"facebook":468,"youtube":469,"linkedin":470},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[472,495,552,581,616],{"title":46,"links":473,"subMenu":478},[474],{"text":475,"config":476},"DevSecOps platform",{"href":55,"dataGaName":477,"dataGaLocation":455},"devsecops platform",[479],{"title":186,"links":480},[481,485,490],{"text":482,"config":483},"View plans",{"href":188,"dataGaName":484,"dataGaLocation":455},"view plans",{"text":486,"config":487},"Why Premium?",{"href":488,"dataGaName":489,"dataGaLocation":455},"/pricing/premium/","why premium",{"text":491,"config":492},"Why Ultimate?",{"href":493,"dataGaName":494,"dataGaLocation":455},"/pricing/ultimate/","why ultimate",{"title":496,"links":497},"Solutions",[498,503,506,508,513,518,522,525,529,534,536,539,542,547],{"text":499,"config":500},"Digital transformation",{"href":501,"dataGaName":502,"dataGaLocation":455},"/solutions/digital-transformation/","digital transformation",{"text":134,"config":504},{"href":129,"dataGaName":505,"dataGaLocation":455},"security & compliance",{"text":123,"config":507},{"href":105,"dataGaName":106,"dataGaLocation":455},{"text":509,"config":510},"Agile development",{"href":511,"dataGaName":512,"dataGaLocation":455},"/solutions/agile-delivery/","agile delivery",{"text":514,"config":515},"Cloud transformation",{"href":516,"dataGaName":517,"dataGaLocation":455},"/solutions/cloud-native/","cloud transformation",{"text":519,"config":520},"SCM",{"href":119,"dataGaName":521,"dataGaLocation":455},"source code management",{"text":109,"config":523},{"href":111,"dataGaName":524,"dataGaLocation":455},"continuous integration & delivery",{"text":526,"config":527},"Value stream management",{"href":161,"dataGaName":528,"dataGaLocation":455},"value stream management",{"text":530,"config":531},"GitOps",{"href":532,"dataGaName":533,"dataGaLocation":455},"/solutions/gitops/","gitops",{"text":171,"config":535},{"href":173,"dataGaName":174,"dataGaLocation":455},{"text":537,"config":538},"Small business",{"href":178,"dataGaName":179,"dataGaLocation":455},{"text":540,"config":541},"Public sector",{"href":183,"dataGaName":184,"dataGaLocation":455},{"text":543,"config":544},"Education",{"href":545,"dataGaName":546,"dataGaLocation":455},"/solutions/education/","education",{"text":548,"config":549},"Financial services",{"href":550,"dataGaName":551,"dataGaLocation":455},"/solutions/finance/","financial services",{"title":191,"links":553},[554,556,558,560,563,565,567,569,571,573,575,577,579],{"text":203,"config":555},{"href":205,"dataGaName":206,"dataGaLocation":455},{"text":208,"config":557},{"href":210,"dataGaName":211,"dataGaLocation":455},{"text":213,"config":559},{"href":215,"dataGaName":216,"dataGaLocation":455},{"text":218,"config":561},{"href":220,"dataGaName":562,"dataGaLocation":455},"docs",{"text":241,"config":564},{"href":243,"dataGaName":244,"dataGaLocation":455},{"text":236,"config":566},{"href":238,"dataGaName":239,"dataGaLocation":455},{"text":246,"config":568},{"href":248,"dataGaName":249,"dataGaLocation":455},{"text":259,"config":570},{"href":261,"dataGaName":262,"dataGaLocation":455},{"text":251,"config":572},{"href":253,"dataGaName":254,"dataGaLocation":455},{"text":264,"config":574},{"href":266,"dataGaName":267,"dataGaLocation":455},{"text":269,"config":576},{"href":271,"dataGaName":272,"dataGaLocation":455},{"text":274,"config":578},{"href":276,"dataGaName":277,"dataGaLocation":455},{"text":279,"config":580},{"href":281,"dataGaName":282,"dataGaLocation":455},{"title":297,"links":582},[583,585,587,589,591,593,595,600,605,607,609,611],{"text":304,"config":584},{"href":306,"dataGaName":299,"dataGaLocation":455},{"text":309,"config":586},{"href":311,"dataGaName":312,"dataGaLocation":455},{"text":317,"config":588},{"href":319,"dataGaName":320,"dataGaLocation":455},{"text":322,"config":590},{"href":324,"dataGaName":325,"dataGaLocation":455},{"text":327,"config":592},{"href":329,"dataGaName":330,"dataGaLocation":455},{"text":332,"config":594},{"href":334,"dataGaName":335,"dataGaLocation":455},{"text":596,"config":597},"Environmental, social and governance (ESG)",{"href":598,"dataGaName":599,"dataGaLocation":455},"/environmental-social-governance/","environmental, social and governance",{"text":601,"config":602},"Diversity, inclusion and belonging (DIB)",{"href":603,"dataGaName":604,"dataGaLocation":455},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":337,"config":606},{"href":339,"dataGaName":340,"dataGaLocation":455},{"text":347,"config":608},{"href":349,"dataGaName":350,"dataGaLocation":455},{"text":352,"config":610},{"href":354,"dataGaName":355,"dataGaLocation":455},{"text":612,"config":613},"Modern Slavery Transparency Statement",{"href":614,"dataGaName":615,"dataGaLocation":455},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":617,"links":618},"Contact Us",[619,622,624,626,631,636,641],{"text":620,"config":621},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":455},{"text":366,"config":623},{"href":368,"dataGaName":369,"dataGaLocation":455},{"text":371,"config":625},{"href":373,"dataGaName":374,"dataGaLocation":455},{"text":627,"config":628},"Status",{"href":629,"dataGaName":630,"dataGaLocation":455},"https://status.gitlab.com/","status",{"text":632,"config":633},"Terms of use",{"href":634,"dataGaName":635,"dataGaLocation":455},"/terms/","terms of use",{"text":637,"config":638},"Privacy statement",{"href":639,"dataGaName":640,"dataGaLocation":455},"/privacy/","privacy statement",{"text":642,"config":643},"Cookie preferences",{"dataGaName":644,"dataGaLocation":455,"id":645,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":647},[648,650,652],{"text":632,"config":649},{"href":634,"dataGaName":635,"dataGaLocation":455},{"text":637,"config":651},{"href":639,"dataGaName":640,"dataGaLocation":455},{"text":642,"config":653},{"dataGaName":644,"dataGaLocation":455,"id":645,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":659,"featuredPost":7291,"totalPagesCount":7309,"initialPosts":7310},[660,685,707,730,752,774,795,818,839,859,880,900,920,940,960,979,1000,1020,1038,1056,1076,1098,1118,1138,1159,1179,1199,1219,1238,1257,1277,1306,1327,1349,1369,1388,1407,1427,1447,1467,1486,1504,1525,1544,1564,1585,1604,1624,1644,1664,1684,1705,1725,1745,1766,1786,1806,1826,1847,1866,1887,1907,1927,1946,1965,1985,2005,2025,2044,2064,2086,2108,2127,2147,2166,2186,2205,2224,2243,2263,2284,2303,2323,2343,2363,2383,2403,2423,2444,2464,2483,2501,2522,2540,2560,2579,2598,2617,2638,2657,2677,2696,2715,2735,2756,2776,2795,2814,2833,2851,2870,2888,2908,2927,2947,2967,2986,3005,3025,3045,3064,3084,3104,3124,3145,3164,3182,3202,3220,3238,3256,3276,3295,3315,3335,3354,3374,3394,3412,3432,3450,3469,3488,3507,3527,3546,3567,3587,3606,3625,3645,3664,3684,3703,3722,3741,3759,3779,3798,3818,3839,3859,3879,3898,3918,3937,3957,3977,3997,4016,4035,4054,4073,4093,4112,4130,4150,4169,4189,4208,4227,4246,4264,4285,4305,4323,4343,4363,4382,4402,4422,4441,4461,4481,4500,4520,4539,4558,4577,4597,4617,4637,4655,4674,4693,4713,4731,4751,4771,4790,4809,4827,4846,4865,4885,4904,4925,4945,4963,4982,5002,5021,5039,5058,5076,5095,5112,5131,5150,5169,5188,5207,5226,5247,5267,5286,5304,5323,5340,5357,5376,5396,5416,5436,5455,5473,5492,5511,5529,5548,5567,5586,5605,5624,5644,5663,5682,5702,5721,5741,5760,5779,5798,5816,5835,5854,5873,5892,5912,5931,5949,5967,5987,6006,6024,6043,6062,6081,6099,6118,6138,6158,6176,6195,6214,6233,6252,6271,6290,6308,6328,6348,6366,6384,6403,6422,6441,6459,6478,6497,6516,6535,6554,6573,6593,6612,6631,6649,6668,6687,6706,6726,6746,6766,6786,6805,6824,6842,6860,6879,6898,6916,6934,6953,6971,6990,7008,7026,7045,7064,7084,7103,7122,7140,7159,7177,7196,7215,7234,7253,7272],{"_path":661,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":662,"content":670,"config":678,"_id":681,"_type":14,"title":682,"_source":16,"_file":683,"_stem":684,"_extension":19},"/en-us/blog/2019-year-in-review",{"title":663,"description":664,"ogTitle":663,"ogDescription":664,"noIndex":6,"ogImage":665,"ogUrl":666,"ogSiteName":667,"ogType":668,"canonicalUrls":666,"schema":669},"Highlights from 2019","2019 was a big year for GitLab! We look back on our achievements and growth from the past year.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665651/Blog/Hero%20Images/gitlab-holiday-2019-blog-cover.png","https://about.gitlab.com/blog/2019-year-in-review","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Highlights from 2019\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-01-09\",\n      }",{"title":663,"description":664,"authors":671,"heroImage":665,"date":673,"body":674,"category":675,"tags":676},[672],"Sara Kassabian","2020-01-09","\n\nAt GitLab, we’re going into 2020 with big energy. 🙌 Take a look at the 2019 milestones that laid a solid foundation for the company as we gear up for our IPO, planned for November 2020.\n\nIn 2019, our company more than doubled in size as we hired more talented folks, many of whom helped us move our product closer to being a true [multicloud solution](/topics/multicloud/). But the core of GitLab is our open source community, and in 2019 our community made plenty of valuable contributions in merge requests, feature fixes, and security checks! Explore some of the 2019 highlights for the GitLab product, community, and company.\n\n- [Product highlights](#product)\n- [Community highlights](#community)\n- [Company highlights](#company)\n\n\n## Product\n\nWe introduced many exciting new features to help our GitLab product better serve the needs of our users.\n\n### Multi-level child epics make project management a breeze\n\nBefore our 11.7 release, epics were limited to a two-level structure, but [in 11.7 we introduced multi-level child epics](/releases/2019/01/22/gitlab-11-7-released/#multi-level-child-epics), so you can now have an ancestor epic that contains up to five levels of child epics, as well as issues. This feature allows longer-term work strategies to be defined in ancestor epics, with strategy and deliverables being articulated in the lower tiers.\n\n\n\n### Auto-renew certs using Let’s Encrypt\n\nOne of our most highly-requested features was the introduction of a custom domain in GitLab pages [that automates HTTPS certificate renewals.](https://gitlab.com/gitlab-org/gitlab-foss/issues/28996) We delivered in 12.1 by integrating with Let’s Encrypt to transition this process from being manual to automated.\n\n### Totally buggin’: Track errors using Sentry\n\nUsing Sentry, our users can get more visibility into their entire stack, making it faster and easier to identify and remediate bugs in your code. [Read this blog post to dive deeper into how our integration with Sentry works](/blog/sentry-integration-blog-post/) or watch the video below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/KUHk1uuXWhA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Accelerate delivery using scoped labels\n\n[We created the scoped labels in 11.10](/blog/issue-labels-can-now-be-scoped/), making it simpler for users to customize workflows and accelerate delivery.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Great news, friends! Issue labels can now be scoped 😍\u003Cbr>\u003Cbr>Scoped Labels make it possible for teams to define a basic custom field that avoids confusion and cleans up issue lists ✔️\u003Ca href=\"https://t.co/U2T9BBIgBs\">https://t.co/U2T9BBIgBs\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1141782522013134848?ref_src=twsrc%5Etfw\">June 20, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWatch the video below to see two use cases for scoped labels.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/4BCBby6du3c\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Merge trains keep your pipeline running\n\nBroken master is a developer’s worst enemy. We want our users to keep their pipelines moving, which is [why we created merge trains to keep your pipelines in the green](/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests/).\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">GitLab 12.1 released with Parallel Merge Trains, Merge Requests for Confidential Issues, Automated Let’s Encrypt certificates for GitLab Pages and much more! Enjoy! 🎉🙌🚀\u003Ca href=\"https://t.co/oRp7YF9mmo\">https://t.co/oRp7YF9mmo\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1153319179266809857?ref_src=twsrc%5Etfw\">July 22, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### CE and EE are in a single codebase\n\nIn August, [we officially migrated GitLab CE and GitLab EE to a single codebase](/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition/). Keeping CE and EE in their own repositories made the development process more complex than was necessary, and by moving to a single codebase we simplified a problem that was becoming more complicated over time. A migration of this size wasn’t a simple process. [Our blog post dives into more detail about how we managed the migration](/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition/).\n\n### Multicloud: This is the way\n\n#### Create and deploy to an EKS cluster\n\nGitLab is designed to be cloud-agnostic and in the spirit of multicloud, [we added an EKS integration to 12.5](/releases/2019/11/22/gitlab-12-5-released/#easily-create-and-deploy-to-an-eks-cluster). Now, users can create and deploy an EKS cluster by selecting the EKS option on the GitLab clusters page rather than having to build the integration from scratch. Watch the demo below to see how it works, or [read our documentation page](/releases/2019/11/22/gitlab-12-5-released/#easily-create-and-deploy-to-an-eks-cluster).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/DGvPEJUnXME\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n#### Deploy to any cloud with GitLab CI/CD\n\nLearn more about how [GitLab CI/CD makes it possible to work with any cloud provider](/blog/gitlab-ci-cd-is-for-multi-cloud/). Study our [Guide to the Cloud](/resources/guide-to-the-cloud/) to become an expert in this topic.\n\nOther notable accomplishments include:\n\n*   [How our delivery team used the “boring solution” to migrate GitLab.com to CI/CD](/blog/gitlab-journey-to-cicd/).\n*   The introduction of [instance-level Kubernetes](https://docs.gitlab.com/ee/user/instance/clusters/).\n*   [DAG pipelines](/releases/2019/08/22/gitlab-12-2-released/#directed-acyclic-graphs-dag-for-gitlab-pipelines), which allow certain jobs to be completed in a non-consecutive order between stages.\n\n## Community\n\nIn 2019, GitLab benefitted from a highly engaged and collaborative community of contributors.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">While GitLab the company is growing quickly, we also have over 2500 contributors to GitLab from the wider community. \u003Cbr>\u003Cbr>Those contributors are providing over 200 contributions per month 💥\u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a> \u003Ca href=\"https://t.co/qrSCCAKtpE\">pic.twitter.com/qrSCCAKtpE\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1181889359492108295?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### Code contributions soared\n\nIn 2018, we had 447 code contributors create 1,608 merge requests. [Our numbers nearly doubled in 2019](https://gitlab.com/gitlab-com/www-gitlab-com/issues/6075#note_262597822) with an astounding 861 code contributors creating 2,437 merge requests (as of Dec. 18 2019). This marks more than 50% year-over-year growth in merged MRs for the wider community. We can’t wait to see what you folks have in store for us in 2020!\n\n## One million merge requests\n\nIn March 2019, our community broke more records by [submitting one million merge requests to GitLab.com](/blog/1-mil-merge-requests/) in a month. In fact, the number of new MRs per active user increased by 40% year-over-year (May 2019 vs. May 2018).\n\nThe majority of these contributions were part of private projects on GitLab.com, indicating there is the potential for _even more growth_ in the New Year if our contributors resolve to submit to some of our public projects too.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">\u003Ca href=\"https://t.co/C4mACZpLWf\">https://t.co/C4mACZpLWf\u003C/a> received a record 1 million merge requests in March 2019 😱\u003Ca href=\"https://t.co/Ii57tcSbq1\">https://t.co/Ii57tcSbq1\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1136714388914757633?ref_src=twsrc%5Etfw\">June 6, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### Our bug bounty program goes public\n\nOur bug bounty program launched in 2017 but was limited to the top 10% of HackerOne contributors. But in 2019, we elected to accelerate our efforts by making the program public – and our community did not disappoint! In the first seven weeks of our program, 42% of all reporters were first-time contributors and 64% of all of the reports we received came from folks new to the GitLab program.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">&quot;We’re proud to see the benefits and value being generated by our bug bounty program and specifically our reporter community.&quot;\u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@GitLab\u003C/a> shares where their team is succeeding and focusing on improvement after moving to a public program. Fantastic job!\u003Ca href=\"https://t.co/iZ7rYqKmmq\">https://t.co/iZ7rYqKmmq\u003C/a> \u003Ca href=\"https://t.co/7WcrPWIMbQ\">pic.twitter.com/7WcrPWIMbQ\u003C/a>\u003C/p>&mdash; HackerOne (@Hacker0x01) \u003Ca href=\"https://twitter.com/Hacker0x01/status/1154159537596899329?ref_src=twsrc%5Etfw\">July 24, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nThank you to all of our reporters who helped make our product and platform even more secure.\n\n## Company\n\nJust like any start-up, GitLab came from humble beginnings, but in 2019 we’ve had more and more organizations adopt our tool as their all-in-one DevOps solution, and our team, funding, and corporate events have grown to accommodate the demand.\n\n### GitLab valued at $2.75 billion\n\nOur plans for a 2020 IPO are off to a roaring start! 🚀 In less than a year, we’ve more than doubled our company’s valuation from $1.1 billion in 2018 to $2.75 billion in 2019, after raising $268 million in September 2019. The money comes from existing funders such as Goldman Sachs as well as nine investors that are brand new to GitLab.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">GitLab (YC W15) hauls in $268M Series E on 2.75B valuation. Congrats to the GitLab team! \u003Ca href=\"https://t.co/8tfxnfu3YN\">https://t.co/8tfxnfu3YN\u003C/a>\u003C/p>&mdash; Y Combinator (@ycombinator) \u003Ca href=\"https://twitter.com/ycombinator/status/1173998823850545157?ref_src=twsrc%5Etfw\">September 17, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWe’ll be reinvesting all of that money into making our DevOps platform the best in its class, bolstering its monitoring, security, and planning capabilities.\n\n### We’re (always) hiring!\n\nSince the company launched in 2015, our headcount has more than doubled each year. At the end of January 2019, we had roughly 452 team members at GitLab but as of Jan. 9, 2020 we've grown to 1,137 team members and counting.\n\n\u003Cembed width=\"100%\" height=\"100%\" src=\"\u003C%= signed_periscope_url(chart: 6551186, dashboard: 503779, embed: 'v2') %>\">\n\nThe chart embedded above provides an interactive look at the growth of our company.\n\nExplosive growth in team members is exciting, but when it comes time to organize GitLab Contribute, our annual event for team members and the wider GitLab community, there simply is no cookie cutter solution for accommodating more than a thousand people. Learn more about [how our corporate events team has mastered the persistent challenge of scale](/blog/how-we-scaled-our-summits/) when planning GitLab Contribute.\n\n### GitLab heads down to the bayou\n\nSpeaking of Contribute... in May 2019, more than 500 GitLab team members met in New Orleans for our yearly summit. In between bites of beignets, our [GitLab team managed to meet, mingle, and ship lots of code](/blog/contribute-wrap-up/). If you missed us in NOLA, [catch us in Prague in 2020](/events/gitlab-contribute/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/xdtPNXtkBhE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nVideo directed and produced by [Aricka Flowers](/company/team/#arickaflowers)\n{: .note}\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Just arrived at \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/Contribute?src=hash&amp;ref_src=twsrc%5Etfw\">#Contribute\u003C/a>! Everything is so amazing the energy is palpable. Thankful to the Contribute Team for all their hard work. Onwards to dinner and debriefing with ma peeps now! \u003Ca href=\"https://twitter.com/hashtag/NOLA?src=hash&amp;ref_src=twsrc%5Etfw\">#NOLA\u003C/a> \u003Ca href=\"https://t.co/NmQ1PtLdkl\">pic.twitter.com/NmQ1PtLdkl\u003C/a>\u003C/p>&mdash; Priyanka Sharma (@pritianka) \u003Ca href=\"https://twitter.com/pritianka/status/1126243914762027008?ref_src=twsrc%5Etfw\">May 8, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### The future of DevOps starts here\n\nThe best way to get a bird’s eye view into operations and decision-making at a rapidly growing company is to start from the highest point. GitLab pioneered a [new CEO shadow program](/handbook/ceo/shadow/) designed to help current and future leaders of GitLab get a comprehensive overview of how our organization operates. The task of a CEO shadow is simple: Join GitLab CEO [Sid Sijbrandij](/company/team/#sytses) at his home office in San Francisco and follow him to relevant meetings (digitally and IRL).\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">It&#39;s been an incredible experience getting to \u003Ca href=\"https://twitter.com/hashtag/contribute?src=hash&amp;ref_src=twsrc%5Etfw\">#contribute\u003C/a> to \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a>! I ❤️ the story my graph tells. Now, which should I be most proud of: \u003Cbr>\u003Cbr>1. Becoming an intermediate-level Git user\u003Cbr>2. Participating in the CEO Shadow Program\u003Cbr>3. Taking 5 wks of vacation last year (clear winner) \u003Ca href=\"https://t.co/hN7kcxEHay\">pic.twitter.com/hN7kcxEHay\u003C/a>\u003C/p>&mdash; Erica Lindberg (@EricaLindberg_) \u003Ca href=\"https://twitter.com/EricaLindberg_/status/1125885748878536705?ref_src=twsrc%5Etfw\">May 7, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n[Erica Lindberg](/company/team/index.html#Lindberg), Global Content Manager, kicked off the CEO shadow program back in April 2019, but since then we’ve had a rotating schedule of CEO shadows that can drop in and drop out with ease and efficiency. [Get an inside look at the life of a CEO shadow by reading Erica's blog post](https://medium.com/gitlab-magazine/acquisitions-growth-curves-and-ipo-strategies-a-day-at-khosla-ventures-2762eb02c83a) and [learn more about the logistics and enrollment criteria](/handbook/ceo/shadow/#expenses-travel-and-lodging).\n\n### GitLab launches Commit, our first user conference\n\n🥳 Contribute is for our team members and community but [GitLab Commit](/events/commit/) is all about our users. We kicked off Commit in London and Brooklyn, inviting GitLab users to join us for a day of DevOps inspiration and learning.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003C!-- first tweet -->\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Hot take: Auto \u003Ca href=\"https://twitter.com/hashtag/DevOps?src=hash&amp;ref_src=twsrc%5Etfw\">#DevOps\u003C/a> cures shell script madness. And \u003Ca href=\"https://twitter.com/hashtag/GitOps?src=hash&amp;ref_src=twsrc%5Etfw\">#GitOps\u003C/a> is just another way to say git is the source of truth. Wisdom from \u003Ca href=\"https://twitter.com/digitalocean?ref_src=twsrc%5Etfw\">@digitalocean\u003C/a> Developer Relations Mgr. \u003Ca href=\"https://twitter.com/eddiezane?ref_src=twsrc%5Etfw\">@eddiezane\u003C/a> &amp; \u003Ca href=\"https://twitter.com/NMFinancial?ref_src=twsrc%5Etfw\">@NMFinancial\u003C/a> Senior Engineers Kyle Persohn, &amp; Sean Corkum \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> Commit. \u003Ca href=\"https://t.co/4YI5WvMRzD\">pic.twitter.com/4YI5WvMRzD\u003C/a>\u003C/p>&mdash; The New Stack (@thenewstack) \u003Ca href=\"https://twitter.com/thenewstack/status/1174035665803186176?ref_src=twsrc%5Etfw\">September 17, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C!-- second tweet -->\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">I started speaking at conferences 11 years ago, and that&#39;s the time I had to wait for an opportunity to present my first talk in English. Thanks \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> for having me at \u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a> last week, for amazing days in London. So much learning, new friends and good memories. \u003Ca href=\"https://t.co/OOchLmelpe\">pic.twitter.com/OOchLmelpe\u003C/a>\u003C/p>&mdash; Mario García (@mariogmd) \u003Ca href=\"https://twitter.com/mariogmd/status/1183450205280186368?ref_src=twsrc%5Etfw\">October 13, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C!-- third tweet -->\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">And that’s a wrap! Thank you, London for an amazing time at \u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a>. We loved hosting our European \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> conference with you. Can’t wait to visit again and bring back some GitLab love to the land of the Brits 💜🇬🇧🧡 \u003Ca href=\"https://t.co/XLZiB2Dgm1\">pic.twitter.com/XLZiB2Dgm1\u003C/a>\u003C/p>&mdash; Priyanka Sharma (@pritianka) \u003Ca href=\"https://twitter.com/pritianka/status/1182254193324806151?ref_src=twsrc%5Etfw\">October 10, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nJoin us in San Francisco on January 14 for our first Commit event of 2020.\n\nThank you to all the folks that contributed to making 2019 such a smashing success and cheers to what’s in store for 2020!\n\nAlso, thank you to Social Marketing Manager [Wil Spillane](/company/team/#wspillane) for helping source the social media posts featured in this blog post.\n\n\n","news",[677,267,9],"features",{"slug":679,"featured":6,"template":680},"2019-year-in-review","BlogPost","content:en-us:blog:2019-year-in-review.yml","2019 Year In Review","en-us/blog/2019-year-in-review.yml","en-us/blog/2019-year-in-review",{"_path":686,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":687,"content":693,"config":701,"_id":703,"_type":14,"title":704,"_source":16,"_file":705,"_stem":706,"_extension":19},"/en-us/blog/5-leadership-lessons-as-product-design-manager",{"title":688,"description":689,"ogTitle":688,"ogDescription":689,"noIndex":6,"ogImage":690,"ogUrl":691,"ogSiteName":667,"ogType":668,"canonicalUrls":691,"schema":692},"5 Leadership Lessons as Product Design Manager","Shortly after my promotion to Staff Product Designer, I was given the opportunity to act as Product Design Manager for CI/CD. These are some of the lessons I learned on design leadership at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664102/Blog/Hero%20Images/gitlab-values-cover.png","https://about.gitlab.com/blog/5-leadership-lessons-as-product-design-manager","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Leadership Lessons as Product Design Manager\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rayana Verissimo\"}],\n        \"datePublished\": \"2021-01-05\",\n      }",{"title":688,"description":689,"authors":694,"heroImage":690,"date":696,"body":697,"category":698,"tags":699},[695],"Rayana Verissimo","2021-01-05","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nGitLab has [a number of career development opportunities](https://about.gitlab.com/handbook/engineering/career-development/), and during my time as an individual contributor (IC) I intentionally leaned towards leadership.\n\nIn October 2020, my manager decided to leave the organization and asked if I was ready to take on her position as Product Design Manager for CI/CD. The [acting manager](https://about.gitlab.com/handbook/engineering/career-development/#acting-manager) is an interim position dedicated to ICs experimenting with the role as they work on determining their career path. I took this role and started reporting directly to the Director of Product Design.\n\nIn parallel, I was promoted to Staff Product Designer! Wait, there's more: my team, Release Management, [was dissolved](https://gitlab.com/gitlab-com/Product/-/issues/1698) and I was assigned as shared resource between the Runner and Testing teams; meaning I had to handoff all my design work and onboard two new stage groups. I remember feeling overwhelmed and excited at the same time. I also remember thinking that growth is supposed to be uncomfortable, and if I had to go through all these new challenges in my professional life I was glad it was at GitLab.\n\nWhat follows are a few lessons I learned in my (ongoing) stint as the acting Product Design Manager for CI/CD. Eventually, I aim to become a manager again, and I hope to remember these lessons and learn even more.\n\n## 1. Define what success looks like for your new role\n\nI knew I had to trace a plan in order to effectively perform my new roles. I set a series of goals that were people and process focused, and that I wanted to eventually feed back into my personal development plan.\n\nAs an acting manager, I first focused on learning how I could help sustain a sense of stability and trust in the team. Performance Reviews and career growth conversations were my top concerns (meaning, learning the _what_ and _how_ of it). Another key element of success was to establish relationships with counterparts in order to understand what they care about, how they collaborate with UX, and what concerns they have. This foundational work provided insights on how I could help myself and others, as well as assess if what I thought was important really needed my attention.\n\nAs a Staff Designer, my plan was to set boundaries to the IC work, specifically regarding all the tactical design I knew I would not be able to deliver and communicate that soon and often to people around me.\n\nBecause we use GitLab for _everything_, I also took the opportunity to create some artifacts that could help automate the onboarding and planning for new acting managers, as well as a plan for my design handoff and onboarding:\n\n-   I defined my [quarterly goals](https://gitlab.com/rayana/plan/-/blob/master/goals/quarterly-goals.md) around my new roles and shared them with my manager and counterparts.\n-   I created an [issue template](https://gitlab.com/gitlab-com/people-group/Training/-/blob/master/.gitlab/issue_templates/acting-manager.md) for onboarding new acting managers.\n-   I made a [plan to transition all my design work](https://gitlab.com/groups/gitlab-org/-/epics/4815) and assigned it to new DRIs.\n-   I used my Group Manager's onboarding issues to get up to speed with understanding the [Runner](https://gitlab.com/gitlab-com/Product/-/issues/1685) and [Testing](https://gitlab.com/gitlab-com/Product/-/issues/1687) groups' visions, roadmaps, competitive landscapes, team health, processes, and partnerships across the organization.\n\n## 2. Managing your schedule is essential\n\nI inherited my previous manager's meetings, meaning my calendar was impossible to manage for a couple of weeks. A packed schedule means I am likely to be launched into an anxiety spiral. Because I am both IC and manager, this created a situation where I was splitting my brain and my attention trying to do too many things at once.\n\n[Darby Frey](https://about.gitlab.com/company/team/#darbyfrey), Sr. Engineering Manager for Verify, shared some kind words with me. He reminded me that I wouldn't be able to do everything I want or need to. _\"It’s impossible to do two full-time jobs. My advice is to do what you can; time-box things; set priorities for the calls; be deliberate about what you choose not to do.\"_\n\n-   I kept creating [a weekly plan](https://gitlab.com/rayana/plan/-/tree/master/tasks/2020) with my priorities. This helped me stay grounded and acknowledge I could only deliver so much in a week.\n-   I started being intentional about focus time by blocking my calendar, forcing myself to work on specific items rather than \"freestyle\" my tasks. This was in particular very painful.\n-   I picked up the habit of time-boxing my work using [Forest](https://www.forestapp.cc/) - a popular productivity app that helps you stay focused. This made me realize that working 3-4 hours without a break was unsustainable and that 30-minute to 1-hour blocks of focused work gives me a greater sense of accomplishment and is healthier.\n\nMy mantra in the last few months has been: be kind to yourself. I believe I still have a long way to go. In the meantime, these resources have helped me quite a bit:\n\n-   [Remote work: 9 tips for eliminating distractions and getting things done](https://about.gitlab.com/blog/eliminating-distractions-and-getting-things-done/)\n-   [7 Tips for Managing Your Schedule Like a Pro](https://www.entrepreneur.com/article/243962)\n\n## 3. Design Managers at GitLab are facilitators\n\nI thought I had a pretty good sense of what being a manager meant... until I became one myself. I've always enjoyed coaching, but there's a huge difference between being a buddy and a manager.\n\nFrom career development to design critique, I believe the true role of managers in the UX department is to facilitate great work and make sure our designers are being supported. I learned that this means getting to know what each designer needs individually - and building that relationship is a job of its own. [Servant-leader qualities](https://about.gitlab.com/company/culture/all-remote/being-a-great-remote-manager/#servant-leader-qualities) are especially true if you are now [managing people who used to be your peers](https://hbr.org/2012/12/how-to-manage-your-former-peer). There was certainly a change in the dynamics for me, but the end goal remained the same: wanting others to succeed.\n\nAn upside of being acting manager is spending more time consulting with the designers and following their work. I started having a better sense of what people are prioritizing and (more importantly) what type of support they need. This overview will be helpful once I transition back fully into my Staff role. Sure, the fact that I had previous context on different product areas was great, but I now I understand why design managers are not able to dive deep into everyday design tasks. This is why they listen and facilitate instead of coming up with solutions. Product Designers are the experts. That being said, I came to the conclusion that I'd rather be a manager that takes a leap of faith than being the person watching over someone's shoulders.\n\n[Valerie Karnes](https://about.gitlab.com/company/team/#vkarnes), Director of Product Design, taught me that you need to make confident decisions with the context you have. That also means trusting people so they can make their own decisions and move forward.\n\n-   Keep asking how you can better support the team. I do this in every 1:1 by asking \"how can I be a better manager for you?\" or \"how can I help you this week?\" People have different feelings about asking for help and I recognize I'm busy, so I find it important to leave that door always open.\n-   Adapt to what each report needs. Some conversations will be harder than others, so make sure you are listening.\n-   Seek ongoing feedback and support from your manager and peers. I meet with [Justin Mandell](https://about.gitlab.com/company/team/#jmandell), Product Design Manager, once every two weeks to talk about people management. I also connected with people who were once interim managers to get to know what challenges they faced and how they solved them.\n-   Be transparent and communicate that you are learning on the job: you don't know everything, and you can't possibly do everything right. If you're in a situation like me where you can't be a manager 100%, let people know that.\n\n## 4. Manager is a different career\n\nAs a new manager, I had to redefine what I call \"results.\" You go from being completely independent to being responsible for the team's output. As an IC, I can measure my output based on how many things I get out the door in a milestone. The usual metrics no longer apply when you're a manager. This can mess with your sense of self-worth, which is being tied for so long to visible, tactical design. Many days I sat in front of the computer feeling I wasn't moving the needle at all. I had to learn on the fly how to get satisfaction from a new way of operating.\n\nMy results now translate into being a network builder by thinking strategically, understanding and communicating the overall company direction, and aligning people's sense of purpose with where the company is going. You can't just pinpoint one specific deliverable that exemplifies all that.\n\nThe rewarding side of managing is watching the CI/CD designers shine: from communicating someone got a discretionary bonus for doing amazing work and exemplifying our company values, to giving positive feedback on a performance review, and helping people figure their career growth plans. This new approach to results made me experience a deep sense of pride for other people's accomplishments. Almost like magic moments of bliss. ✨\n\nOn the other hand, I had to handle my own disappointment in not being involved at the level I could help with the hands-on design work. I was unable to deliver feature proposals at the same pace as before. Even onboarding Testing and Runner proved to be a challenge; I couldn't do it at the speed I _wanted to_.\n\nI learned that becoming a manager is not an extension of my IC career: being a manager is either/or. If I want to be a good manager, I want to have the time to be a consistent manager.\n\n-   Ask yourself: are you ready to help design other people's careers instead of features?\n-   There’s a level of separation when you become a manager and you need to be comfortable with that. I found myself feeling isolated from the things that give me joy, like tactical design and stage group rituals.\n\n## 5. Share your learnings\n\nThe final lesson is a small one, but it can have a deep impact on our team of designers. Management opportunities are created based on [merit and company need](https://about.gitlab.com/handbook/engineering/career-development/#ux-department), and it is imperative that designers understand what challenges they might face and what the path to management looks like. Keep sharing what you've done and how you've done it to succeed as an IC. I became more self-aware of my accomplishments and I learned that people are craving actionable guidance. Becoming a manager is a beacon of hope!\n\nI am privileged for having the chance to experience the manager role before making the transition. [Leadership](https://about.gitlab.com/handbook/leadership/) is a long-term learning and I know have a ton to learn. I hope the lessons I shared are also valuable to you during your own journey.\n\nThank you for reading and thank you to GitLab for enabling my growth. 👣\n","unfiltered",[700,9],"UX",{"slug":702,"featured":6,"template":680},"5-leadership-lessons-as-product-design-manager","content:en-us:blog:5-leadership-lessons-as-product-design-manager.yml","5 Leadership Lessons As Product Design Manager","en-us/blog/5-leadership-lessons-as-product-design-manager.yml","en-us/blog/5-leadership-lessons-as-product-design-manager",{"_path":708,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":709,"content":715,"config":724,"_id":726,"_type":14,"title":727,"_source":16,"_file":728,"_stem":729,"_extension":19},"/en-us/blog/a-deep-dive-into-the-security-analyst-persona",{"title":710,"description":711,"ogTitle":710,"ogDescription":711,"noIndex":6,"ogImage":712,"ogUrl":713,"ogSiteName":667,"ogType":668,"canonicalUrls":713,"schema":714},"A deep dive into the Security Analyst persona","See how we created our new Security Analyst persona, and how we are already putting it to use.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663736/Blog/Hero%20Images/a-deep-dive-into-the-security-analyst-persona.jpg","https://about.gitlab.com/blog/a-deep-dive-into-the-security-analyst-persona","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A deep dive into the Security Analyst persona\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andy Volpe\"}],\n        \"datePublished\": \"2019-02-12\",\n      }",{"title":710,"description":711,"authors":716,"heroImage":712,"date":718,"body":719,"category":720,"tags":721},[717],"Andy Volpe","2019-02-12","\nAs GitLab grows, so does our need for new, more area-specific personas. Recently, as part of our [effort to create personas](/blog/personas-and-empathy-building/), I was given a chance to craft one. As the UX designer for [the Secure team](/handbook/engineering/development/sec/secure/) here at GitLab, I jumped at the opportunity to learn more about security professionals, and how we may create products and features to meet their needs. Throughout the entire process, I gained a greater sense of empathy and a deeper understanding of the needs, goals, and pain points of security professionals. The result was our new [Security Analyst Persona, Sam](/handbook/product/personas/#sam-security-analyst). However, I will add a caveat that this is not the end of the process, but the beginning of how we can better support security professionals with new features and functionality that address their specific needs. You can peruse the highlights and the persona itself below, and let us know what you think by tweeting us [@gitlab](https://twitter.com/gitlab)!\n\n## The research\n\nHere are some takeaways from the [10 interviews](https://gitlab.com/gitlab-org/ux-research/issues/97) I conducted to create the Security Analyst persona.\n\nWe’ve learned that the Security Analyst is a bit of a generalist when it comes to their day-to-day tasks. From the research, I found that there isn’t one specific task that defines their day, but a grouping of tasks under the umbrella of security. I’ve written the summary of the persona to reflect the somewhat general nature of the Security Analysts' role:\n\n>\"I wear lots of hats, but the majority of my time is spent monitoring and flagging events, running down high-priority tasks and working with other teams to implement new systems.\"\n\n### What motivates a Security Analyst?\n\nSecurity Analysts strive for order in the chaos and, based on our research, are taking steps to achieve that order. One specific example:\n\n>When I’m monitoring my dashboards, I want to see everything I am monitoring in one tool, so I can do my job easier and more efficiently.\n\nMoving between different tools and dashboards was identified as a significant problem area for Security Analysts. They found it hard to create a workflow that was conducive to remediating security issues while having to work across multiple tools.\n\nAnother motivation I found during the research was that Security Analysts desire to be more proactive than reactive in their work. I’ve summarized this by adding the objective below:\n\n>When security testing, I want to be more proactive than reactive, so I can anticipate potential threats or vulnerabilities before the bad guys do.\n\nBy being more proactive or shifting left in their work, Security Analysts are able to identify and remediate potential vulnerabilities before they become a problem or even lead to an attack.\n\n### What are some of the frustrations Security Analysts have?\n\n>I’m frustrated I don’t have the resources to complete this project to its specifications.\n\nand\n\n>I’m frustrated when I know how to fix a security issue but the red tape at my company doesn’t allow me to in a timely manner.\n\nA common theme seen throughout the research was that of constrained resources and time. Often we found that security teams were small in comparison to other teams within their organization. This resource discrepancy leads to work being done at such a pace that the project can’t be completed to its specifications or in a timely manner.\n\n### How are we using the security Analyst persona at GitLab?\n\nWe are all-in on making the Security Persona a first-class persona here at GitLab. Recently we launched the [Group-level Security Dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/), which allows security professionals to monitor all their projects, in one view, for vulnerabilities, and gives them the ability to take action on those vulnerabilities right from the dashboard itself.\n\nAside from security dashboards, we are constantly dreaming up more security features and enhancements that will help users keep their instances, groups, and projects secure. You can [see our roadmap here](/direction/#future-releases) for more on what's coming.\n\n## The persona\n\n![Sam, Security Analyst persona](https://about.gitlab.com/images/blogimages/security-analyst-persona.png){: .shadow.center}\n\nKeep an eye out for the rest of our series on the [new personas](/handbook/product/personas/)!\n\n[Photo](https://unsplash.com/photos/z55CR_d0ayg) by [Andrew Neel](https://unsplash.com/@andrewtneel) on Unsplash\n{: .note}\n","security",[722,9,720,700,723],"testing","workflow",{"slug":725,"featured":6,"template":680},"a-deep-dive-into-the-security-analyst-persona","content:en-us:blog:a-deep-dive-into-the-security-analyst-persona.yml","A Deep Dive Into The Security Analyst Persona","en-us/blog/a-deep-dive-into-the-security-analyst-persona.yml","en-us/blog/a-deep-dive-into-the-security-analyst-persona",{"_path":731,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":732,"content":738,"config":746,"_id":748,"_type":14,"title":749,"_source":16,"_file":750,"_stem":751,"_extension":19},"/en-us/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition",{"title":733,"description":734,"ogTitle":733,"ogDescription":734,"noIndex":6,"ogImage":735,"ogUrl":736,"ogSiteName":667,"ogType":668,"canonicalUrls":736,"schema":737},"Update: Why GitLab uses a single codebase for Community and Enterprise editions","Dive into our decision to switch GitLab over to a single codebase as we review some of the benefits and challenges. Learn more here!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671631/Blog/Hero%20Images/merge-ce-ee-codebases.jpg","https://about.gitlab.com/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update: Why GitLab uses a single codebase for Community and Enterprise editions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yorick Peterse\"}],\n        \"datePublished\": \"2019-08-23\",\n      }",{"title":733,"description":734,"authors":739,"heroImage":735,"date":741,"body":742,"category":743,"tags":744},[740],"Yorick Peterse","2019-08-23","\n\nIn [\"GitLab might move to a single Rails\ncodebase\"](/blog/merging-ce-and-ee-codebases/), we announced that GitLab\nmight move to using a single codebase for GitLab Community Edition (CE) and\nGitLab Enterprise Edition (EE). Since then we have decided to continue moving\ntoward a single codebase. In this article, I highlight some of the challenges,\nrequired work, and steps remaining to complete the switch.\n\n## What is codebase?\n\nWhat is a codebase, I hear you ask? Well, a codebase (which is at times spelled as code base) is essentially the entire collection of source \ncode that is required for a program or application to function properly. This can include things like configuration \nfiles, libraries, and other dependencies, in addition to the actual application code. The codebase is \ntypically stored in a single location, often within a source control repository, where multiple developers \ncan access and make contributions to it.\n\nMultiple developers can use and contribute to a single codebase, which is generally retained within a source control \nrepository. As such, it can assist with the backup and versioning of overlapping code \nmodifications/alterations. This can be especially important for larger projects that require a lot of coordination \nand communication between team members. With everyone working from the same codebase, it becomes easier \nto ensure that changes are made consistently and in a way that does not break the application.\n\n## Why GitLab uses a single codebase?\n\nPrior to using a single codebase, for years CE and EE used two different repositories for the Rails application.\nBy using separate repositories we could separate proprietary code from code that\nis free software. On the surface this seems like a good idea for different\nreasons (e.g., licensing), but over the years the drawbacks\nbegan to outweigh the benefits.\n\nWe [mention some of these drawbacks in a previous\narticle](/blog/merging-ce-and-ee-codebases/), but more or less they all\ncome down to the same core problem: It made the development process more complex\nthan necessary. For example, we ended up with around 150 merge requests spread\nacross CE and EE for a security release from several months ago. While the\nprocess of merging these merge requests is automated, we ran into a variety of\nissues (e.g. failing tests) that required manual intervention. We could have\nreduced the number of merge requests by half if we used a single repository,\ncreating less work for developers and release managers.\n\nToward the end of 2018, I felt that we were running out of time and had to do\nsomething about the separation of CE and EE. We had always tried to avoid\nmerging the two repositories due to the complexity and time involved, but it\nstarted to become more and more clear we had no other option. [Marin\nJankovski](/company/team/#maxlazio), Delivery engineering manager, and I made a\nplan to merge the two repositories. Marin wrote a [design\ndocument](/handbook/engineering/infrastructure/library/merge-ce-ee-codebases/)\nthat outlined the details of it all. The design document showed what challenges\nwe faced, and gathered the critical support required for the largest engineering\nprojects at GitLab to date.\n\n## What is the difference between a codebase and a repository?\n\nThe basic difference between a codebase and a repository is that one is for old code and one is for new code. \n\nBut more specifically...\n\nA codebase can be either a public or private place to store large amounts of code that is actively being iterated on in a version control system, and typically stored in a source control repository in a version control system.\n\nA source code repository is where an archived version of the code being worked on is kept. It’s also a place to house documentation, notes, web pages, and other items in your repository. \n \n## Working toward a single codebase\n\nMoving to a single codebase is not something we can do overnight for a project\nthe size of GitLab. Workflows must be adapted, developers need to adjust to the\nnew setup, and automation requires extensive changes.\n\nOne of the biggest challenges from an engineering perspective was to come up\nwith a way to transparently remove proprietary code from GitLab when building a\nCE release. A naive approach might involve a script that removes known bits of\nproprietary code. While this might work for small projects that don't change\noften, this was not going to work for a project the size of GitLab.\n\nRuby provides us with a solution to this problem. In Ruby, you can create a\nmodule and inject it into another module or class. Once injected, the\nfunctionality of the module becomes available to the target module or class.\nThis is best illustrated with a simple example:\n\n```ruby\nclass Person\n  def initialize(name)\n    @name = name\n  end\n\n  def name\n    @name\n  end\nend\n\nmodule Greet\n  def greet\n    \"Hello #{name}\"\n  end\nend\n\nPerson.include(Greet)\n\nalice = Person.new('Alice')\n\nalice.greet # => \"Hello Alice\"\n```\n\nHere we define a class `Person`, followed by a module that is used to create a\nmessage greeting a person. Next, we include it into the `Person` class, at which\npoint we can use the module's methods for instances of the `Person` class. The\nresult is the message \"Hello Alice.\"\n\nWhile this example is not exciting, using a setup like this allows us to\nmove proprietary code to separate modules, and inject these modules when GitLab\nEE is used. For GitLab CE, we would remove these modules, and the code injecting\nthese modules would have to disable itself transparently and automatically.\n\nGitLab EE has been using this setup since late 2016 with all EE modules residing\nin a separate \"ee\" directory, but in a limited number of places. This meant that\nin some places EE and CE code got mixed together, while in other places the two\nare separate. For example, we had code like this:\n\n```diff\n def lfs_upload_access?\n   return false unless project.lfs_enabled?\n   return false unless has_authentication_ability?(:push_code)\n+  return false if project.above_size_limit? || objects_exceed_repo_limit?\n\n   lfs_deploy_token? || can?(user, :push_code, project)\n end\n```\n\nHere EE added a line into an existing method without using a separate module,\nmaking it difficult to remove the EE-specific code when for CE.\n\nBefore we could move to a single codebase, we had to separate EE-specific code from code shared between CE and EE. Due to the amount\nof work necessary, we divided the work into two departments: backend and\nfrontend. For every department we created issues outlining the work to do for\nthe various parts of the codebase. We even included the [exact lines of code\nthat had to change directly in the created\nissues](https://gitlab.com/gitlab-org/gitlab-ee/issues/9506), making it simple\nto see what one had to do. Each department also had an engineer assigned as the\nlead engineer, responsible for taking on the most difficult challenges.  [Filipa\nLacerda](/company/team/#FilipaLacerda), senior frontend engineer of Verify (CI)\nand Delivery, was in charge of frontend code. [As the Delivery backend engineer,\nI myself](/company/team/#yorickpeterse) was in charge of backend code.\n\nSome changes were small and took a short amount of time, with others were big\nand took weeks. One of my big challenges was to make sure CE and EE [use the same\ndatabase schema](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/26940),\nchanging just under 24,000 lines of code over a two-month period.\n\n>In total the work involved 55\ndifferent engineers submitting more than 600 merge requests, closing just under\n400 issues, and changing nearly 1.5 million lines of code\n\nFilipa spent a lot of time creating 168 frontend issues outlining specific tasks\nas well as submitting 124 merge requests to address the majority of these\nissues. Resolving some of these issues required getting rid of some\ntechnical debt first, such as [breaking up large chunks of code into smaller\nchunks](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/14592), and\ncoming up with a way [to create EE-specific Vue.js\ntemplates](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/25650).\n\nWhile Filipa and I took on the biggest challenges, in total the work involved 55\ndifferent engineers submitting more than 600 merge requests, closing just under\n400 issues, and changing nearly 1.5 million lines of code.\n\n## Moving toward a single codebase\n\nWith most of the work done, we could start looking into what project setup we\nwould use for a single codebase. We came up with three different approaches:\n\n### 1. Single codebase: moving all development into gitlab-ce\n\nAll code and development is moved into the gitlab-ce repository. The gitlab-ee\nrepository is archived, and a separate repository is set up as a mirror of\ngitlab-ce, called gitlab-foss. Proprietary code is removed from this mirror\nautomatically.\n\nSince most of GitLab's development takes place in the current gitlab-ce\nrepository, this setup would reduce the number of issues to move as well as merge requests to close. A downside of this approach is that clones of\nthe gitlab-ce repository will include proprietary code.\n\n### 2. Single codebase: moving all development into gitlab-ee\n\nAll code and development is moved into the gitlab-ee repository. The gitlab-ce\nrepository remains as is in terms of code, and will become a mirror of gitlab-ee. Like\nthe first option, proprietary code is removed from this mirror automatically.\n\nThis setup means that users cloning gitlab-ce don't end up with proprietary code\nin their copy of gitlab-ce.\n\n### 3. Single codebase: moving all development into a new repository\n\nWe set up an entirely new repository called \"gitlab,\" and move all code and\ndevelopment into this repository. The gitlab-ce and gitlab-ee repositories will\nbecome read-only. A mirror is set up (called \"gitlab-foss\") that mirrors the new\n\"gitlab\" repository, without including proprietary code.\n\n## Deciding which single codebase approach to take\n\n[Having evaluated all the benefits and\ndrawbacks](https://www.youtube.com/watch?v=LV_AHeL5sIo), we decided to go with\noption two: move development into gitlab-ee. This approach has several benefits:\n\n1. The code of the gitlab-ce repository remains as is, and won't include any\n   proprietary code.\n1. We do not need a separate mirror repository that does not include proprietary\n   code. Instead, we rename the gitlab-ce repository to \"gitlab-foss.\" We are\n   renaming the repository since having \"gitlab\" and \"gitlab-ce\" as project\n   names could be confusing.\n1. Users building CE from source don't end up with proprietary code in their\n   copy of the gitlab-ce repository.\n1. We keep the Git logs of both gitlab-ce and gitlab-ee, instead of losing the\n   logs (this depends a bit on how we'd move repositories around).\n1. It requires the least amount of changes to our workflow and tooling.\n1. Using a single project and issue tracker for both CE and EE makes it easier\n   to search for issues.\n\nIssues created in the gitlab-ce project will move to the gitlab-ee project,\nwhich we will rename to just \"gitlab\" (or \"gitlab-org/gitlab\" if you include the\ngroup name). This project then becomes the single source of truth, and is used\nfor creating issues for both the CE and EE distributions.\n\nMoving merge requests across projects is not possible, so we will close any open\nmerge requests. Authors of these merge requests will have to resubmit them to\nthe \"gitlab\" (called \"gitlab-ee\" before the rename) project.\n\nWhen moving issues or closing merge requests, a bot will also post a comment\nexplaining why this is done, what steps the author of a merge request has to\ntake, and where one might find more information about these procedures.\n\nPrior to the single codebase setup, GitLab community contributions would be submitted\nto the gitlab-ce repository. In the single codebase, contributions are instead\nsubmitted to the new gitlab repository (\"gitlab-org/gitlab\"). EE-specific code\nresides in a \"ee\" directory in the repository. Code outside of this directory\nwill be free and open source software, using the same license as the gitlab-ce\nrepository currently uses. This means that as long as you do not change anything\nin this \"ee\" directory, the only change for GitLab community contributions is the use\nof a different repository.\n\nOur current plan is to have a single codebase the first week of September.  GitLab 12.3 will be the first release based on a single codebase.\n\nUsers that clone GitLab EE and/or GitLab CE from source should update their Git\nremote URLs after the projects are renamed. This is not strictly necessary as\nGitLab will redirect Git operations to the new repository. For users of our\nOmnibus packages and Docker images nothing changes.\n\nThose interested in learning more about what went on behind the scenes can refer\nto the following resources:\n\n* [A video in which we discusses the benefits and drawbacks of the various\n  project setups](https://www.youtube.com/watch?v=LV_AHeL5sIo)\n* [The issue detailing the remaining work to do](https://gitlab.com/gitlab-org/gitlab-ee/issues/13304)\n* [A list of all the single codebase merge requests](https://gitlab.com/groups/gitlab-org/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&label_name%5B%5D=single%20codebase)\n\nCover image from [Unsplash](https://images.unsplash.com/photo-1512217536414-d92543c79ca1)\n{: .note}\n","engineering",[9,745],"open source",{"slug":747,"featured":6,"template":680},"a-single-codebase-for-gitlab-community-and-enterprise-edition","content:en-us:blog:a-single-codebase-for-gitlab-community-and-enterprise-edition.yml","A Single Codebase For Gitlab Community And Enterprise Edition","en-us/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition.yml","en-us/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition",{"_path":753,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":754,"content":760,"config":768,"_id":770,"_type":14,"title":771,"_source":16,"_file":772,"_stem":773,"_extension":19},"/en-us/blog/a-special-farewell-from-gitlab-dmitriy-zaporozhets",{"title":755,"description":756,"ogTitle":755,"ogDescription":756,"noIndex":6,"ogImage":757,"ogUrl":758,"ogSiteName":667,"ogType":668,"canonicalUrls":758,"schema":759},"A special farewell from GitLab’s Dmitriy Zaporozhets","A message from GitLab's co-founders","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670031/Blog/Hero%20Images/siddz.png","https://about.gitlab.com/blog/a-special-farewell-from-gitlab-dmitriy-zaporozhets","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A special farewell from GitLab’s Dmitriy Zaporozhets\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"},{\"@type\":\"Person\",\"name\":\"Dmitriy Zaporozhets\"}],\n        \"datePublished\": \"2021-11-10\",\n      }",{"title":755,"description":756,"authors":761,"heroImage":757,"date":764,"body":765,"category":675,"tags":766},[762,763],"Sid Sijbrandij","Dmitriy Zaporozhets","2021-11-10","\n**Sid Sijbrandij**: Today, we shared the news with the team that my co-founder and the creator of GitLab the open source project, Dmitriy Zaporozhets, has made the decision to leave his position as Engineering Fellow at GitLab. Below, Dmitriy shares this news in his own words with the GitLab community.\n\n**Dmitriy Zaporozhets**: I remember when I told Sid, my co-founder and the CEO of GitLab, that I wanted to commit 10 years to GitLab from the time I started the project in October of 2011. It’s been an amazing journey over the last 10 years. And just last month, on October 14, 2021, I joined Sid at Nasdaq in New York City as GitLab became a public company. Today, I want to share with the GitLab community that I am stepping away from my position as an Engineering Fellow at GitLab Inc. I have fulfilled my 10 year vision, and I feel that I can step away with so much pride in what GitLab has become and so much faith in where GitLab is headed.\n\nBack in 2011, I was working as a software developer. I worked with version control everyday. There was no modern open source software to run on your server and I saw an opportunity to make something useful as a hobby project. I started the project with Valery Sizov, and I remember having coffee with him, discussing the challenges and improvements we wanted to make. It was such a great feeling to brainstorm something that we cared so deeply about. \n\nIn that first year, we grew a functional open source community around the project. I was quite surprised that so many people participated. That gave me energy and confidence to keep going with the project. Everytime someone contributed, I felt like I needed to put even more effort into it. \n\nIn 2012, I got an email from Sid, saying that he wanted to let me know that he started GitLab.com. I remember he said, “I hope you don’t mind.” The truth is, I was happy that he was interested in GitLab. I hoped that he would bring more users and potentially more contributors. I did not imagine we would end up joining together and making the project as popular as it has become. \n\nI came to work at GitLab full time in 2013. A team member recently asked me what my job title was when I started working at GitLab full time. The answer: it was Dmitriy. I didn’t have a formal title in the beginning. My first official title with the company was Chief Technology Officer. Quite a lot happened during that time. I wrote a lot of code, merged a lot of merge requests. I still have the highest number of commits in the main repository. I worked closely with the first front-end developer and the first UX designer, and we were building everything from scratch.\n\nBy October of 2018, the company grew to the size when the CTO couldn't write the code anymore. I transitioned into my role as Engineering Fellow and worked on several new features in the product. It was a time when building something was just as important as not breaking it. \n\nIt has been an amazing experience to be a part of GitLab’s evolution into [The DevOps Platform](/solutions/devops-platform/). From the simple source control software to the platform that helps you deliver better software faster. I am very glad to have been a part of GitLab’s growth and to be a part of building something that is so valuable to software development. \n\nIt was an honor to be in New York City last month to help Sid ring the opening bell at Nasdaq and see GitLab become a publicly traded company. I know what we do at GitLab had value for the rest of the world even before the company went public. However, experiencing GitLab’s listing day in person filled me with emotion, and there’s one thing I know for sure: the world cares about what we are doing here. \n\nI am so thankful to everyone who has contributed to GitLab. I especially want to thank Valery Sizov, Kamil Trzciński, Douwe Maan, Phil Hughes, Stan Hu, Rémy Coutable, Robert Speicher, and Sean McGivern, most of whom joined soon before or after Y Combinator and took our productivity to the next level. You were all just as passionate about the product as I was and it was amazing to work with you all. I also want to thank employee number 1 Marin Jankovski and employee number 2 Jacob Vosmaer, both of whom are still at GitLab Inc. after all these years. And of course, I want to thank Sid. Sid, your noble aspirations and your strong leadership made all of this possible. Thank you for your partnership over the years, it has been an honor to be on this journey with you as co-founders.\n\nThank you to the community and everyone who has believed in, supported, and contributed to GitLab’s journey. I believe we are still early in GitLab’s evolution, and I cannot wait to see what the team and the community do next.\n\nDmitriy  \n\nDmitriy Zaporozhets\n\nCo-founder, GitLab\n\n**Sid Sijbrandij**: Dmitriy, it has been an incredible 10 years since you started the project. Thank you for creating GitLab, making it open source, joining me on this journey and caring for GitLab for the last 10 years. It was an amazing experience to ring the opening bell with you at Nasdaq and I’m excited to continue to build upon what we’ve created thus far and lead GitLab through its next phase. \n\nWhile you are leaving your position as Engineering Fellow, I know that we will still keep in touch, and your legacy will live on through the company and the wider community. To honor your impact, GitLab Inc. has announced an annual “DZ Award” to recognize one team member who has made great impact by solving a difficult problem using a [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions), because at GitLab, we celebrate the boring solution as a simple, fast, and effective way to maintain our speed of innovation. It will be an honor to recognize team members who embody your innovative spirit and deliver results that help us continue to grow.\n\nDmitriy, thank you for the incredible impact you’ve had on all of us. \n\nSid\n\nSid Sijbrandij\n\nCo-founder and CEO, GitLab\n",[9,267,767],"contributors",{"slug":769,"featured":6,"template":680},"a-special-farewell-from-gitlab-dmitriy-zaporozhets","content:en-us:blog:a-special-farewell-from-gitlab-dmitriy-zaporozhets.yml","A Special Farewell From Gitlab Dmitriy Zaporozhets","en-us/blog/a-special-farewell-from-gitlab-dmitriy-zaporozhets.yml","en-us/blog/a-special-farewell-from-gitlab-dmitriy-zaporozhets",{"_path":775,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":776,"content":782,"config":789,"_id":791,"_type":14,"title":792,"_source":16,"_file":793,"_stem":794,"_extension":19},"/en-us/blog/a-tale-of-two-editors",{"title":777,"description":778,"ogTitle":777,"ogDescription":778,"noIndex":6,"ogImage":779,"ogUrl":780,"ogSiteName":667,"ogType":668,"canonicalUrls":780,"schema":781},"A tale of two file editors","How UX Research revealed unexpected patterns in how people use two GitLab file editors: the single-file editor and the Web IDE.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668339/Blog/Hero%20Images/a-tale-of-two-editors.jpg","https://about.gitlab.com/blog/a-tale-of-two-editors","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A tale of two file editors\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2020-09-01\",\n      }",{"title":777,"description":778,"authors":783,"heroImage":779,"date":785,"body":786,"category":787,"tags":788},[784],"Emily von Hoffmann","2020-09-01","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-11-13.\n{: .alert .alert-info .note}\n\nThis study began the way many do – with a conundrum and a theory. \n\nThe [Create: Editor](/handbook/product/categories/#editor-group) group originally had the goal of deprecating the single-file editor in favor of the new and improved [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/). But after looking into usage data, we discovered that [the single-file editor was being used at higher rates](https://www.youtube.com/watch?v=3iPNyfVNO1U&feature=youtu.be) than the Web IDE, and in fact had quadruple the page views. This was a concerning discovery with the potential of becoming a real inflection point, raising questions like: Do we have a discoverability problem, or do we have a usability problem? Has our investment in the Web IDE been moving us in the wrong direction? \n\n![Web IDE pageviews](https://about.gitlab.com/images/blogimages/web-ide-pageviews.png){: .medium.center} \n\n![Single-file editor pageviews](https://about.gitlab.com/images/blogimages/single-file-editor-pageviews.png){: .medium.center}\n\nAs you can see above, the single-file editor significantly outperforms the Web IDE in terms of page views. The single-file editor also receives more visitors who are coming to and from merge requests.  \n\n## Our theory\n\nWe initially thought the single-file editor got more usage than the Web IDE due to discoverability problems. Since people should be able to accomplish everything they need within the Web IDE, maybe they just don’t know it exists. Or, maybe the “edit” button on the single-file editor seems like the obvious choice for what people want to do, whereas “Web IDE” sounds more complicated. There’s also always a nagging concern that people have tried the Web IDE and found it to be a bad experience, opting instead to stick with the alternative. \n\n## Our research plan\n\nWe developed a survey to hammer out whether people know how to edit a file in GitLab, why they choose the editor they do, and why, if ever, they choose the other one. \nFor reference, here’s what they both look like: \n\n### Exhibit A: Single-file editor \n\n![Gif of Single-file editor in action](https://about.gitlab.com/images/blogimages/single-file-editor-ezgif.gif){: .shadow.center}\n\n### Exhibit B: Web IDE \n\n![Gif of Web IDE in action](https://about.gitlab.com/images/blogimages/web-ide-ezgif.gif){: .shadow.center} \n\n## Results\n\nAfter reviewing the survey results, we started seeing clear patterns that indicate people actually have distinct use cases for each editor. They’re not using the single-file editor because they can’t find the Web IDE; they’re purposefully selecting an editor based on the complexity of what they need to accomplish. \nPeople prefer the single-file editor when they need to make very simple changes to a single file. It’s aptly named in that sense! Alternatively, people choose the web IDE when they want to edit multiple files, or when they want to make changes that require context from other files. These changes might include hotfixes, creating templates, and making changes related to GitLab CI files.  \n\nWe also learned that people want the ability to edit in context. Today, people choose an editing mode and then switch between screens to navigate to other project areas, which isn’t the best experience. What people really want is the ability to toggle editors and easily access navigation while in the Web IDE:\n\n> \"Let me toggle between editors (e.g. if I start with Editor and realize I need to edit another file, I can switch to Web IDE – with any changes made carried over).\"\n\n## What’s next\n\nI love this study because it so clearly demonstrates the value of research. Had we gone ahead with our original theory, we would have been solving a perceived discoverability problem that people aren’t really having. Instead, we disproved our theory and have several proposed improvements already in the works. \nInstead of removing the single-file editor in favor of promoting the Web IDE, we’ll explore a simplified editing workflow that [consolidates the Edit and Web IDE buttons](https://gitlab.com/gitlab-org/gitlab/-/issues/221247) so that a dropdown allows people to choose their preferred mode. You can see mockups for the next potential iterations below.\n\n| Description | Mock |\n| ------ | ------ |\n| Web IDE option chosen (default) | ![Web IDE chosen by default](https://about.gitlab.com/images/blogimages/web-ide-chosen-by-default.png) |\n| Edit option chosen | ![Edit chosen by default](https://about.gitlab.com/images/blogimages/file-chosen-by-default.png) |\n| Dropdown | ![Dropdown lets you choose](https://about.gitlab.com/images/blogimages/dropdown-choose-your-editor.png) |\n\nWhat do you think about the proposed change? Come talk to us on [Twitter](https://twitter.com/gitlab/), and join our [UX research program](/community/gitlab-first-look/) to participate in future studies. \n\n## Read more about UX at GitLab\n\n- [How holistic UX design increased GitLab.com free trial signups](/blog/how-holistic-ux-design-increased-gitlab-free-trial-signups/)\n- [How we created a dark UI for GitLab's Web IDE](/blog/creating-a-dark-ui-for-gitlabs-web-ide/)\n- [Designing in an all-remote company](/blog/designing-in-an-all-remote-company/)\n\n[Katherine Okpara](/company/team/#katokpara) contributed to this post.\n\nCover image by Gastón Blaquier on [Unsplash](https://unsplash.com/photos/_foeAxTQ5H0).","insights",[700,9],{"slug":790,"featured":6,"template":680},"a-tale-of-two-editors","content:en-us:blog:a-tale-of-two-editors.yml","A Tale Of Two Editors","en-us/blog/a-tale-of-two-editors.yml","en-us/blog/a-tale-of-two-editors",{"_path":796,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":797,"content":803,"config":812,"_id":814,"_type":14,"title":815,"_source":16,"_file":816,"_stem":817,"_extension":19},"/en-us/blog/advice-for-women-seeking-careers-in-tech",{"title":798,"description":799,"ogTitle":798,"ogDescription":799,"noIndex":6,"ogImage":800,"ogUrl":801,"ogSiteName":667,"ogType":668,"canonicalUrls":801,"schema":802},"Use your uniqueness as a superpower and other advice for women seeking careers in tech","GitLab's Women's Team Member Resource Group shares tips on how to make a mark in this industry.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677856/Blog/Hero%20Images/collaboration.png","https://about.gitlab.com/blog/advice-for-women-seeking-careers-in-tech","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use your uniqueness as a superpower and other advice for women seeking careers in tech\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kyla Gradin Dahl\"}],\n        \"datePublished\": \"2022-04-04\",\n      }",{"title":798,"description":799,"authors":804,"heroImage":800,"date":806,"body":807,"category":808,"tags":809},[805],"Kyla Gradin Dahl","2022-04-04","\n\nGitLab's [Women's Team Member Resource Group (TMRG)](/company/culture/inclusion/tmrg-gitlab-women/), a forum for women to find their voice and be heard, celebrated Women's History Month by reflecting on what it means to be a woman in technology, how they arrived here, and who inspires them. We also gathered advice for other women who want to enter or advance in this industry.\n\n## Tips for women in tech\n\nAt GitLab, we work within our [CREDIT values](https://handbook.gitlab.com/handbook/values/#credit) every day. The organization’s [team member resource groups](/company/culture/inclusion/erg-guide/) amplify our value of [Diversity, Inclusion, and Belonging](https://handbook.gitlab.com/handbook/values/#diversity-inclusion). And, aligned with our value of [transparency](https://handbook.gitlab.com/handbook/values/#transparency), we’re able to share the voices of our TMRG here with our wider GitLab community.\n\nBelow are perspectives from:\n- [Jane Gianoutsos](https://gitlab.com/jgianoutsos), Manager, Support Engineering\n- [Michelle Hodges](https://gitlab.com/mwhodges), VP of Global Channels\n- [Taharah Nix](https://gitlab.com/tnix), Associate Paralegal, Employment\n- [Sherrod Patching](https://gitlab.com/spatching), Senior Director, Technical Account Manager\n- [Juliet Wanjohi](https://gitlab.com/jwanjohi), Senior Security Engineer, Security Automation\n\n**What advice would you give to women who are considering a career in technology?**\n\n**Michelle:** Unapologetically go for it. The industry requires diverse collaborators and contributors to make sure that the technology that runs the world, our schools, our homes, etc. is made by people with diverse backgrounds, perspectives, and life experiences. Just by showing up in technology, you make a meaningful impact on the world today and for the future. \n\n**Sherrod:** Honestly, I believe that anyone from any background can be successful and fulfilled in tech. Before pivoting into tech, I started my career as a musician. In tech, we are constantly creating, which is incredibly fulfilling as a creative person.\n\n**Taharah:** I would say that there's a place for everyone in tech. A lot of times people can be intimidated when they think of working for a tech company because they may not have the experience that they think they need. However, just as with any other company, there are a lot of different business needs within the company and all perspectives are necessary. So, I would say think about what you're most comfortable doing and expand from there. There are endless opportunities for learning.\n\n**Juliet:** The first step when considering moving into a career in the technology industry would be to come up with a strategy - explore the different pathways available and identify your area of interest. The next step would be to look at ways of leveling up your skills and knowledge by doing certifications, reading books, and listening to podcasts/audiobooks related to your area of interest. \n\nLeverage your network and community connections by reaching out and having [coffee chats](/company/culture/all-remote/informal-communication/#coffee-chats) with individuals who are in the tech field to get more insight and advice on how they got into the industry and tips that helped them along the way.\n\n**What tips do you have for women working towards being senior leaders?**\n\n**Michelle:** Leadership requires authenticity in self while being focused on the success of those you lead. Know where you want to go and build those experiences into your CV intentionally. Grit and resilience will serve you well - so build them into your wheelhouse.\n\n**Sherrod:** Lead by example, even if you are in an individual contributor role. Some of the best leaders I know led long before they had a team. Know where you are going, determine the milestones to getting there, and follow through on execution.\n\n**Juliet:** Taking the lead in shaping conversations about your career path with your manager is definitely important. You can do this by drawing up a roadmap or a plan of what you aspire to achieve, and where you'd like to be in the future, and being accountable by making a habit of evaluating your progress towards your goal of becoming a senior leader. Another essential tip would be to work towards increasing your sphere of influence and forming a network of professional relationships outside of your immediate team, as this opens a doorway for more collaboration opportunities with other teams and a chance to continually hone and fine-tune your leadership skills.\n\n**Did you have any women mentors (formal or informal) when you were building your career? What was some key advice they gave you and how important do you think mentorship is for future leaders?** \n\n**Michelle:** Your “otherness” is your superpower. You have a unique way of approaching problems, leading people, and showing up in a team setting - lean into that. Don’t let your otherness impact your authenticity. Not always but often, boys are raised to be brave while girls are raised to be perfect. Do not let your desire to be perfect stand in the way of taking risks, being brave, and being authentic. \n\n**Sherrod:** I have had mentors, but not women mentors. I can't advocate enough for having someone further along than you that can help you see things from angles you can't yet see from.\n\n**Juliet:** Yes, and I still do! One key [piece of] advice that I received at the start of my career from one of my mentors was to leap out of my comfort zone and go where the opportunities are. Waiting for your career to build itself rarely works, it is up to you to be committed and work towards getting those opportunities that you feel will uplift you and get you one step closer to your goal. \n\nMentorship is an integral piece for future leaders because it gives them an opportunity to shadow and seek advice from women who have had more experience with climbing the tech career ladder, and can help them map out their career path in accordance with their interests and goals. Having a mentor also gives them the chance to receive honest and constructive feedback on any challenges that they may face, and how they can potentially turn these challenges into growth opportunities!\n\n**What has been the proudest moment in your career so far and why?** \n\n**Michelle:** Seeing previous employees and mentees thrive in their careers.\n\n**Sherrod:** One moment that comes to mind is having led the acquisition of another company in my previous role before GitLab. I led the process and was considerably out of my comfort zone, which is when I learn the most. \n\n**Jane:** I’m proud of:\n- Earning not only the trust but the respect of a team member who was adamant I was the wrong person for the role when I was being appointed to it.\n- The card I received at a farewell saying I was the most effective manager the highly regarded engineer had worked for.\n- The unexpected recommendation and thanks written for me on LinkedIn by someone I had encouraged to notice his leadership skills and who went on to do just that.\n- The call I got from a third party after another person’s farewell from an ex-employer to tell me how much that departing person referred to my influence during their farewell speech.\n- The customer who insisted on coming to my farewell with flowers and champagne.\n- The peer I first worked with in 2005 who I still discuss career growth and life decisions with.\n\n**How important are GitLab’s values in building an inclusive culture for women at GitLab?** \n\n**Michelle:** Vitally important. In the workplace, whether it's GitLab or not, women have a responsibility to drive the change that creates not only an equal workplace but an equitable workplace. Equitable meaning working motherhood, caretaking, many women’s belief they need to be perfect, the imbalance of gender or URG representation, etc. - all these and more need to be accounted for to create a truly equitable work environment \n\nGitLab’s culture provides a space for women to lean into this responsibility, speak up, and make iterative and incremental changes that will impact future generations of women in the workplace and women leadership.\n\n**Sherrod:** Incredibly. I am a wife and a mom of two little girls first and foremost, and GitLab makes it possible to have a career and a career trajectory while also not sacrificing my family. \n\n**Jane:** GitLab has genuinely been life-changing for me. Through necessity, I’ve always been ok with being often the only woman in the tech team or even the company - or at least I thought I was ok with it!\n\nThen I started working here and discovered what it was to have space held for every voice, where I wasn’t reliant on allies to hold space or amplify my voice or sanity-check my suspicions about bad behaviors. Where [microaggressions](https://handbook.gitlab.com/handbook/values/#understanding-the-impact-of-microaggressions) are understood and challenged if they occur, where I don’t have to fight to advocate for the [uniqueness of people](https://handbook.gitlab.com/handbook/values/#quirkiness) but am empowered to support the fulness of all my team and colleagues, where we [normalize talking to each other](https://handbook.gitlab.com/handbook/values/#see-something-say-something) when we see old bad habits in play and where we do that with kindness.\n\nI’ve been moved to tears by people’s kindness, by the depth of inclusion I have come to experience here. I am often left pondering how very different things would have been for me had I experienced this in the early years of my career. I have no doubt mine would have been a very different journey, where I could have expended less energy on battling self-doubt and on healing, and more on growth and contribution.\n\n_The Women’s TMRG also invited [Christie Lenneville](/handbook/product/ux/one-pagers/christie-readme/), GitLab VP of UX, to share her experiences during a live speaker series, open to everyone at the company. You can watch the replay of the conversation below._\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/a10N6xYB7Ps\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n\n","culture",[9,810,811],"careers","collaboration",{"slug":813,"featured":6,"template":680},"advice-for-women-seeking-careers-in-tech","content:en-us:blog:advice-for-women-seeking-careers-in-tech.yml","Advice For Women Seeking Careers In Tech","en-us/blog/advice-for-women-seeking-careers-in-tech.yml","en-us/blog/advice-for-women-seeking-careers-in-tech",{"_path":819,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":820,"content":826,"config":833,"_id":835,"_type":14,"title":836,"_source":16,"_file":837,"_stem":838,"_extension":19},"/en-us/blog/agile-for-remote-work",{"title":821,"description":822,"ogTitle":821,"ogDescription":822,"noIndex":6,"ogImage":823,"ogUrl":824,"ogSiteName":667,"ogType":668,"canonicalUrls":824,"schema":825},"How async and all-remote make Agile simpler","Engineers at GitLab and IssueTrak share their tips on adopting Agile while working remotely.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681930/Blog/Hero%20Images/runlanes.jpg","https://about.gitlab.com/blog/agile-for-remote-work","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How async and all-remote make Agile simpler\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2021-03-02\",\n      }",{"title":821,"description":822,"authors":827,"heroImage":823,"date":828,"body":829,"category":808,"tags":830},[672],"2021-03-02","\n\nWhether you have the [Agile manifesto](https://agilemanifesto.org/) memorized or thought agility was a sport for dogs, there are a few core principles that engineers and non-engineering folks can adopt to improve communication, collaboration, and efficiency in their work – whether or not they’re working from the same office.\n\nInterestingly, the first piece of advice GitLab team members shared for engineers (or content developers) using Agile or working remotely is the same: Over-communicate!\n\n\"Provide maximum context in discussions and document the outcomes in the most appropriate location,\" says [Lindsay Kerr](/company/team/#lkerr), frontend engineering manager for Threat Management at GitLab. \"This allows other members of the team to benefit from synchronous conversations while giving stakeholders insight into the progress of the team.\"\n\n## How Agile keeps development lean\n\n[Agile software development](/topics/agile-delivery/) is all about developing solutions through collaboration and iteration, with some of the techniques being stand-ups, sprints, and more. Another key principle of Agile: Making processes more lean.\n\nDuring our annual user conference [GitLab Commit](https://www.youtube.com/watch?v=t8BvRMalbkM&list=PLFGfElNsQthYQaTiUPQcu4O0O20WHZksz&index=10), the software company [IssueTrak](https://www.issuetrak.com/) explained how migrating to GitLab helped the company embrace Agile software development. Before, IssueTrak was using five tools to manage their ticketing and repositories and power their [CI/CD pipelines](/features/continuous-integration/), at a substantial monthly cost. After IssueTrak migrated to GitLab, they reduced their monthly costs by 80% and simplified their toolchain by adopting GitLab for all their software development needs. You can [read more about their experience below](#how-two-teams-use-sprints-with-gitlab).\n\n### Why all-remote and Agile pair well together\n\nGitLab has embraced the principles of Agile software development in two key ways. The first way we've built agility and efficiency into our culture is by embracing all-remote, asynchronous work. By working remotely, team members can work when they want and in places and spaces that best suit them. Remote work has become more widely adopted since the COVID-19 pandemic has disrupted the traditional office, explains [Lauren Barker](/company/team/#laurenbarker), fullstack developer working on the Website.\n\nRemote work is a simple concept to grasp, but asynchronous (async) work is a bit more complicated. At GitLab, working async looks like optional meetings with detailed agendas and Slack channels are busy but without the pressure of an immediate reply. Zoom meetings are recorded and posted on the [GitLab Unfiltered channel](https://www.youtube.com/channel/UCMtZ0sc1HHNtGGWZFDRTh5A), which supports our commitment to transparency and breaks down silos by improving communication across teams.\n\n\"Working asynchronously enables an individual to contribute when they’re 'on',\" says Lauren. \"Sometimes you’re feeling super productive and motivated on a certain project at 2 AM, not during normal business hours such as 9-5.\"\n\nThe core of effective async, all-remote, and Agile workflows is documentation. By clearly defining project scope and needs in writing, processes are easy to follow and replicable for all users. At GitLab, perhaps the most important rule of all is our [handbook-first principle](/company/culture/all-remote/handbook-first-documentation/), which states that our handbook is the single source of truth in the organization and challenges team members to document everything. [Tyler Williams](/company/team/#tywilliams), website fullstack developer at GitLab, discussed the value he’s derived from the handbook-first mentality at a [recent Inbound Marketing team meeting](https://www.youtube.com/watch?v=qhsdwlqvuN4&list=PL05JrBw4t0KrurHzoPhov77x3_P26Ncz1) and said that handbook-first coupled with async work is what powers Agile for him.\n\n## Insights on remote team building with Agile\n\nTyler and Lindsay both acknowledge it can be challenging to build team camraderie remotely when applying Agile principles like stand-ups when you're not in-person.\n\n\"It is easier to implement the human-connectivity pieces of an Agile mindset when you are in person,\" says Tyler. \"It is easier to implement the process-focused pieces of Agile techniques when you are all-remote and asynchronous.\"\n\n\"Distance can remove people from consequences,\" adds Brandon. \"A bad manager could drop a project on you, turn off remote messaging, and go on vacation. I've experienced this at previous workplaces.\"\n\nBut working alongside humans in the same space isn’t always an upside. In-person work can make personality clashes more commonplace, says Lindsay.\n\n### Remembering when Agile was analog\n\nBefore project management tools like Jira and GitLab, scrum teams had to plan sprints manually using things like post-its, index cards, and white boards. While this analog approach to sprint planning can be good for team-building, it was less efficient in the long-run.\n\n\"When I started working on scrum teams in 2008 we actually stood up together in a room during stand-up. We looked at post-it notes (tasks) associated with index cards (stories) when discussing the answers to our three questions (what did I do yesterday, what am I doing today, and what is blocking me),\" says Lindsay.\n\n\"We used an egg-timer to make sure our stand-up didn't go longer than 10 minutes each day. I drew our burndown on paper each day, across every two-week sprint, for the course of a three-month project. We looked each other in the eyes when we gave our answers, watched our teammates move the post-it note from 'to-do' to 'in progress', and celebrated together when a post-it moved to the 'done' column.\"\n\nIt is hard to document progress using the analog approach to sprint planning. When one team member is out sick or on vacation, they lose the historical context of a project as post-its move columns, and meetings happen without thorough notes or recordings.\n\n\"In an office setting, it may be easier to adopt the human-focused mindset, but it is much more challenging to adopt appropriate processes to keep Agile techniques running, and it is a much less enjoyable endeavor to coach people around process,\" says Tyler.\n\n### GitLab is designed for Agile\n\nThe other way we've embraced Agile principles at GitLab: [we've baked many Agile artifacts into different features of our DevOps Platform](/blog/gitlab-for-agile-software-development/) such as issues, labels, milestones, and weights, etc. \"These words seem somewhat abstract but they are all different ways to help you categorize and organize information to help you work agilely,\" explains [Brandon Lyon](/company/team/#brandon_lyon), frontend engineer for Marketing at GitLab.\n\nThese Agile features coupled with robust CI/CD help us keep GitLab lean and allow _our customers_ to continuously deliver software to _their customers_.\n\n\"If the main point of Agile is to continuously deliver working software as value to customers, GitLab enables teams to be Agile because it has the best CI/CD tools I've ever used, and they're integrated directly in my day-to-day task management workflow,\" says Tyler.\n\n## How two teams use sprints with GitLab\n\nIn their GitLab Commit presentation, IssueTrak team members Lisa Cockrell, director of development, and Jordan Upperman, fullstack developer, said that they created two custom issue boards using GitLab, one of which is the \"Ready for Sprint\" column and board. Sprint planning meetings are much shorter now because the team can just look at the \"Ready for Sprint\" board to identify which issues are ready to enter the development process.\n\n\"Our use of these two Kanban boards allows us to pivot with ease when necessary. As bugs are found during testing it's easy for us to quickly weigh the new ticket, remove an item with equal weight, and send it back to the top of the 'Ready for Sprint' column,\" says Lisa. She explains that this process prevents scope creep and helps stakeholders remember that when work is added to the sprint, something else must come out. Watch the entire presentation to learn more about how IssueTrak uses GitLab tools for Agile development:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/t8BvRMalbkM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nBrandon, Tyler, and Lauren all work on the [Digital Experience team at GitLab](/handbook/marketing/digital-experience/), which is responsible for our marketing website. In the spirit of iteration and efficiency (two of our values at GitLab), the team is in the process of updating the way they conduct sprints. Tyler [opened an MR to facilitate the discussion](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/74534) – share your tips for sprints by commenting on the MR or suggesting a change.\n\nWe are constantly looking for new strategies to communicate and engineer with clarity and efficiency. If you have any suggestions for how to better embrace Agile, async, and all-remote work, let us know in the comments or tweet at us @GitLab. If you are still new to this topic, our advice is to try and go with the flow, and leave your expectations at the door.\n\n\"If you keep in mind that Agile is a flexible, human-focused approach to knowledge work and delivering value to customers, the rest will fall in place,\" says Tyler. \"Take strong opinions with a grain of salt, and give yourself room to make mistakes and remember that [it's impossible to know everything](https://handbook.gitlab.com/handbook/values/#its-impossible-to-know-everything).\"\n",[831,832,9],"agile","remote work",{"slug":834,"featured":6,"template":680},"agile-for-remote-work","content:en-us:blog:agile-for-remote-work.yml","Agile For Remote Work","en-us/blog/agile-for-remote-work.yml","en-us/blog/agile-for-remote-work",{"_path":840,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":841,"content":847,"config":853,"_id":855,"_type":14,"title":856,"_source":16,"_file":857,"_stem":858,"_extension":19},"/en-us/blog/agile-iteration-unique-onboarding-experience",{"title":842,"description":843,"ogTitle":842,"ogDescription":843,"noIndex":6,"ogImage":844,"ogUrl":845,"ogSiteName":667,"ogType":668,"canonicalUrls":845,"schema":846},"Agile iteration: My unique onboarding experience at GitLab","How I learned to iterate quickly during my first week at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662877/Blog/Hero%20Images/security-cover-new.png","https://about.gitlab.com/blog/agile-iteration-unique-onboarding-experience","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Agile iteration: My unique onboarding experience at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Fahey\"}],\n        \"datePublished\": \"2019-04-26\",\n      }",{"title":842,"description":843,"authors":848,"heroImage":844,"date":850,"body":851,"category":720,"tags":852},[849],"Michael Fahey","2019-04-26","\n\nMy name is Michael Fahey. I have been working in the security and IT industries for over 15 years. Recently, I joined GitLab’s Security Team as the manager of the [Red Team](/handbook/security/#red-team). The GitLab Red Team is responsible for assessing the overall security posture of GitLab as a company as well as testing the security and defensive capabilities of our products and services.\n\nWe demonstrate that by telling the stories of our exploits, to help provide context and flavor to the risks we identify. We are white-hat hackers emulating adversaries, and bad guys, so we can rapidly iterate on our security practices resulting in a stronger security posture and better security products.\n\nThe Red Team is a new team. Initially, when I talked to my manager, I was expecting to plan and conduct Red Team exercises after I onboarded. An opportunity presented itself for me to join the [CEO Shadow Program](/handbook/ceo/shadow/), so instead, in my second week, I was in San Francisco working with the CEO of GitLab, [Sid Sijbrandij](/company/team/#sytses)! One thing to know about Sid is that he is passionate about security, so while I was a part of the CEO Shadow Program, he recommended we perform a social engineering exercise against GitLab. I was starting to understand how serious GitLab is about our values, and I wanted to get the firsthand experience with one of our values, [iteration](https://handbook.gitlab.com/handbook/values/#iteration).\n\nThe tempo at which everything was going was not something I was used to. When faced with a new situation like this, I try to emphasize care by slowly gathering information on the target, then building a believable story to persuade the target to perform what I want them to. Social engineering exercises are more about building trust and sympathy than anything else. Sid, however, insisted that I just execute and iterate on the exercise, despite my reservations. Sid was trying to teach me something important which I did not yet grasp.\n\n## What are our Red Team exercise goals?\n\nThe goal of the exercise was to observe how a new employee would react to the demands of the CEO. From the perspective of an adversary, the goal was to compromise GitLab.com by impersonating the CEO, and then demand that an employee with privileged access install an authorization key, controlled by the Red Team, to production servers.\n\nThe expected value of this basic exercise was to identify areas of improvement and level set on our current security stance. It's a starting point to allow us to iterate and build upon. Ideally, we hoped our chosen target would report the incident to the Security Operations Team. At that point, the event would be triaged, and the account deactivated quickly to mitigate any further impact.\n\n### Here is how we scoped this basic exercise:\n\n- Limit the activity to Slack.\n- Emulate an immature, aggressive adversary.\n- Target and identify the people who administer our production systems.\n- Assume compromise of the CEO's Slack identity. For the objectives of this exercise, we don't care how Sid's identity got compromised. In fact, the impersonated Slack account was provisioned before this exercise.\n- The Security Operations Team were not aware of the engagement and were not notified prior to this exercise.\n\n### How did the attempted compromise go?\n\nSo, as the adversary, we started out with the pre-provisioned CEO slack account and logged in. Next, we needed to learn more about GitLab and find the weakest link in the chain to exploit. Luckily, GitLab makes all the information we need publicly available\nwithin the [handbook](/handbook/) and [team](/company/team/) pages.\n\n### Here is what we learned:\n\n- The Infrastructure Team administers all of GitLab’s production systems.\n- The Infrastructure Team remotely accesses, controls and manages GitLab.com.\n- I identified a new GitLab team-member who had just joined the Infrastructure Team. His Slack profile really stood out for us:\n\n![New GitLab team-member Slack profile](https://about.gitlab.com/images/blogimages/red-team-exercise/slack-profile.png){: .shadow.medium.center}\n\nWe found the status of “Onboarding – be gentle” too good not to take up. So, we sent out an urgent request impersonating the CEO of GitLab. “Sid” had an urgent request to add his SSH key to the production systems and Target0 was the only one that could help. Check out what the Red Team sent him below.\n\n![Message from \"Sid\"](https://about.gitlab.com/images/blogimages/red-team-exercise/sid-message.png){: .shadow.medium.center}\n\nFor context alone, there is one crucial fact to understand. An Advanced Persistent Threat (APT) would not burn such a high-value asset as Sid’s Slack profile on something so aggressive. It has too high of a chance for failure. That isn’t to say this doesn’t happen. It is generally a more immature adversary who just wants to do a smash and grab of whatever they can get.\n\nWith the message sent, Target0 never responded. We didn’t have any insight as to what was happening, and we didn’t want to push too hard, so we took a different tactic. We contacted his manager, Target1 to see if we could pressure Target0 through another trusted means.\n\n![Message from \"Sid\" to manager](https://about.gitlab.com/images/blogimages/red-team-exercise/sid-message-manager.png){: .shadow.medium.center}\n\nLooks like we are onto something here! Target1 is going to look into it for us, but we hear nothing back. At this point in the exercise, we were still not sure what was happening in the background and waited over an hour. Our access was still intact, so we weren’t sure if we were caught or they were working on implementing the request.\n\n## What actually happened?\n\nTurns out Target0 challenged the request and reached out for help from our Security Operations team. We failed to compromise GitLab.com, but there could be more to learn in how Security Operations responded to the event. One can see that Target0 created the following ticket below. At that point, our Security Operations team was on it!\n\n![Ticket to SecOps](https://about.gitlab.com/images/blogimages/red-team-exercise/sec-ops-ticket.png){: .shadow.medium.center}\n\nThe Security Operations Team immediately triaged the issue. They got in touch with Sid’s executive assistant. She asked the Security Operations team to hold off on any action then went unresponsive for a half hour, because she knew about this exercise, and was advised to take the actions that she took. This stalled the response process. During that time, the Red Team still had control over Sid’s Slack account, which had not been deactivated.\n\n## What were the results of the exercise?\n\nFrom a Red Team perspective, we _wanted_ to fail in our exercises, but fail or succeed, it is critical that everyone involved learns from the experience.\n\n### Here are some key observations:\n\n- Target0 and Target1's instincts and decisions were validated. They did the right thing to challenge and report the request from “Sid.” They are now more empowered to challenge dubious claims in the future. Heroes of the story!\n\n- The Security Operations Team quickly responded to and triaged the incident. However, through a combination of the following, a final response was delayed:\n    - Sid’s executive assistant requested to delay action until she heard back from Sid.\n    - There was a lack of evidence indicating unauthorized access (via investigation of Slack’s audit logs).\n    - Positive confirmation from the executive assistant that Sid was in an interview (thus no physical breach). A Security Operations team member later jested:\n\n![SecOps team member joke](https://about.gitlab.com/images/blogimages/red-team-exercise/slack-comment.png){: .shadow.medium.center}\n\n- Communication is critical when running Red Team exercises, and a failure in communication can lead to failures in efficiency. For example:\n    - When the Red Team exercise is starting, send a notification to leadership that the activity is beginning, so that leaders can better respond to the natural panic of these engagements.\n    - Perform a Zoom review meeting with the Sr. Director of Security, VP of Engineering, and the CEO to make sure everyone is on the same page.\n\n## How did this social experiment play out?\n\nGitLab is a growing startup with lots of new employees onboarding and an evolving security organization. GitLab demonstrated their ability to be agile and security-aware, but we’ve now started a conversation on why people shouldn’t blindly follow orders due to the person's position and authority, like the CEO. That is precisely why controls like Separation of Duties [(NIST 800-53 Security Control: CA-5)](https://nvd.nist.gov/800-53/Rev4/control/AC-5) and the incident response process are so critical.\n\nThis exercise allowed both the Red Team and Security Operations Team the opportunity to learn and grow together. Red Team is the robbers and Security Operations the cops, but what can happen if the robbers and cops start working together? If one of my favorite shows, \"White Collar,\" is any indicator, we can achieve far more together than we could alone.\n\n## What did I learn from all of this?\n\nFrom my perspective, I expected Target0 and Target1 to report the issue and Security Operations to respond to the incident. The Red Team’s goal should be about empowering people to champion cybersecurity challenges and solutions. We may do that through adversarial means to highlight problems, but it should always be for the benefit of the employees, customer, and company. I feel like some of us in the industry forget that from time to time.\n\nOutside of the exercise, I learned the importance of [iteration](https://handbook.gitlab.com/handbook/values/#iteration)\nand a strategic concept GitLab employs called [Breadth over Depth](/company/strategy/#breadth-over-depth).\nThe idea is to iterate as fast as possible to learn and grow as fast as possible. Quickly learn and grow as opposed to planning something over days and weeks.\n\nIf you quickly iterate then fail or succeed, you can learn far more than if you carefully planned\nevery step then execute on that plan. There is no guarantee that any plan or\nidea will succeed, no matter how much planning and thought you put into it. There is truth in\nthe saying, “No plan survives first contact with the enemy.”\n\nWe can’t wait for that perfect moment or take the time to develop the perfect plan because we will become stagnant and learn little otherwise. What should you do then? Rapidly iterate. Over time, you will grow far faster, be more capable, and have greater insight into your solution.\n",[9,720],{"slug":854,"featured":6,"template":680},"agile-iteration-unique-onboarding-experience","content:en-us:blog:agile-iteration-unique-onboarding-experience.yml","Agile Iteration Unique Onboarding Experience","en-us/blog/agile-iteration-unique-onboarding-experience.yml","en-us/blog/agile-iteration-unique-onboarding-experience",{"_path":860,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":861,"content":867,"config":874,"_id":876,"_type":14,"title":877,"_source":16,"_file":878,"_stem":879,"_extension":19},"/en-us/blog/all-remote-fundraising",{"title":862,"description":863,"ogTitle":862,"ogDescription":863,"noIndex":6,"ogImage":864,"ogUrl":865,"ogSiteName":667,"ogType":668,"canonicalUrls":865,"schema":866},"How to raise funds as an all-remote startup","GitLab CEO Sid Sijbrandij and podcast host Maren Kate unpack why venture firms struggle to fund all-remote startups.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673152/Blog/Hero%20Images/remotefundraisinghurdle.jpg","https://about.gitlab.com/blog/all-remote-fundraising","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to raise funds as an all-remote startup\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-08-16\",\n      }",{"title":862,"description":863,"authors":868,"heroImage":864,"date":870,"body":871,"category":787,"tags":872},[869],"Valerie Silverthorne","2019-08-16","\nIt’s possible to be an all-remote startup and get venture capital – GitLab is proof of that – but that doesn’t mean it’s easy. GitLab CEO [Sid Sijbrandij](/company/team/#sytses) spoke with [Maren Kate](https://www.linkedin.com/in/marenkate), host of the From 5 to 50 podcast, about why venture capitalists don’t love all-remote companies and how to work around that challenge. The [Remote AF](https://podcasts.apple.com/us/podcast/gitlab-raised-$100m-got-valued-at-over-billion-by-starting/id1467214647?i=1000444691471) podcast is aimed at startups looking to scale their companies from 5 to 50 employees and beyond.\n\nMaren starts by asking Sid how the concept of all-remote was received by the venture capital community. Sid’s response: “They don’t like remote. We missed out on investors because we are remote. We have skepticism from investors because we are remote.”\n\n## Stages of fundraising\n\nFundraising changes as a company grows, and it gets easier with time, Sid explains. “In the beginning they assess your team, then they assess your product, and then they assess your financials.” That’s why it can be hard for a newly-minted, all-remote startup to get fundraising traction in the early stages, he says. “When it comes to the team, they’re super skeptical they will be able to create something with all-remote. Then when it’s about the product they say, ‘Yes, maybe, but what about scaling?’ And then when it’s about the financials you can let the numbers speak for themselves so it’s less of a concern.”\n\nAnd in the early days of GitLab, even Sid was skeptical enough about all-remote to open an office. That office made our [series A financing](/blog/gitlab-announces-4m-series-a-funding-from-khosla-ventures/) easier, he says. But Sid soon realized that people weren’t coming into the office (San Francisco Bay Area traffic being what it is) so committed to an [all-remote philosophy](/company/culture/all-remote/). That decision made [series B fundraising efforts](/blog/gitlab-master-plan/) difficult. Some investors just said no to GitLab, but a few at least asked for an explanation. Even after an explanation, many remained dubious and in the end it took an enthusiastic VC who’d actually stayed up late reading through the handbook and vouched for GitLab to seal the deal.\n\n>Some of the best ideas are the least plausible.\n\nAll-remote companies are getting a toehold today, Sid offers, pointing to [InVision](/blog/pyb-all-remote-mark-frein/), WordPress, and Zapier. But there are still some factors that can inhibit fundraising options. “If we were to be acquired there’s probably a 50% discount, because for the acquiring company it’s so hard to bring people over to their headquarters,” Sid says. “Since an acquisition is the most likely outcome (for most startups), if you raise venture capital that depresses the evaluation you will get.”\n\n## Has co-location hit the wall?\n\nOn the upside, though, Sid thinks the limits of co-location are being made very clear. “Investors in San Francisco are all battling it out. They’re saying ‘Our portfolio companies are getting outbid by Google, by eBay, by Airbnb for engineering talent.’ Retention is an enormous problem at these companies. So they don’t like remote yet, but they’re starting to sour on the co-located model and all the disadvantages.”\n\nAnd while the all-remote path might be tough, Sid continues to stress the benefits to startups. “Remote offers you much easier hiring and scaling. Remote forces you to do the things you should be doing anyway, but you do them sooner.”\n\nAt the end of the day, Maren wonders whether some unconscious bias is at play. “If you see someone in an office, it makes them more successful,” she says, “but it’s not really that, it’s just human instinct.”\n\nSid agrees, and then adds perhaps the strongest argument in favor of all-remote – co-location can have a very dampening effect on innovation. “There's a lot of detrimental things that happen because some of the best ideas are the least plausible, like run an illegal taxi service, have people rent out their own home to strangers, or start competing with GitHub. And if you co-locate people, they're going to have to tell everyone what they do. And when they see people frowning, they're going to switch to something more plausible. And that's what you want to prevent.”\n\nListen to the whole conversation [here](https://podcasts.apple.com/us/podcast/gitlab-raised-$100m-got-valued-at-over-billion-by-starting/id1467214647?i=1000444691471).\n\nCover image by [Pau Casals](https://unsplash.com/@paucasals) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,832,873],"startups",{"slug":875,"featured":6,"template":680},"all-remote-fundraising","content:en-us:blog:all-remote-fundraising.yml","All Remote Fundraising","en-us/blog/all-remote-fundraising.yml","en-us/blog/all-remote-fundraising",{"_path":881,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":882,"content":888,"config":894,"_id":896,"_type":14,"title":897,"_source":16,"_file":898,"_stem":899,"_extension":19},"/en-us/blog/all-remote-is-for-everyone",{"title":883,"description":884,"ogTitle":883,"ogDescription":884,"noIndex":6,"ogImage":885,"ogUrl":886,"ogSiteName":667,"ogType":668,"canonicalUrls":886,"schema":887},"Why we believe all-remote is for everyone","Darren Murph, leading GitLab's all-remote initiatives, shares why the future of work can be embraced today.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680729/Blog/Hero%20Images/dm-globe.jpg","https://about.gitlab.com/blog/all-remote-is-for-everyone","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we believe all-remote is for everyone\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Murph\"}],\n        \"datePublished\": \"2019-08-15\",\n      }",{"title":883,"description":884,"authors":889,"heroImage":885,"date":891,"body":892,"category":808,"tags":893},[890],"Darren Murph","2019-08-15","\n\nWe're committed to [all-remote](/company/culture/all-remote/) work at GitLab – our whole work philosophy\nis designed around it. I joined GitLab to lead our all-remote\ninitiatives – here's a bit about my background and why I'm excited to join the team.\n\n### A pivotal moment in how society looks at remote work\n\nGitLab is known by many as an [open core company](/blog/monetizing-and-being-open-source/) which develops software for the software\ndevelopment lifecycle. What I want the world to know is that it’s *also* a pioneer in remote work,\nbuilding a transparent, empowered workforce of [over 800 team members across 57+ countries](/company/team/).\nYou read that correctly. Over 800 of us, none of whom are required to work from a central\noffice, are making GitLab one of the world’s largest all remote companies.\n\nI was recently given the honor of joining GitLab to lead its all remote initiatives. The\ncompany’s remarkable growth and impact on the world is well documented, but as I’ve\nengaged with team members – as well as pets and families in the background! – I’m beginning to\nunderstand that we’ve barely scratched the surface of what’s possible.\n\n![Embracing the remote lifestyle in Alabama Hills, California](https://about.gitlab.com/images/blogimages/dm-alabama-hills-california.jpg){: .shadow.medium.center}\nEmbracing the remote lifestyle in Alabama Hills, California\n{: .note.text-center}\n\nI believe we’re nearing a sea change in how we work. It’s easy to point to stratospheric rent prices in major urban centers, soul-crushing gridlock, and shifting mindsets in what society values in a career as reasons for turning to remote work.\n\nBut I think it’s deeper than that. We yearn to explore, and work doesn't have to stand in the way.\n\n### Positive change is possible as all-remote becomes the default for many companies\n\nThe internet has never been faster nor more ubiquitous. Computing power has never been more\naccessible. It’s time for organizations large and small to embrace these realities, to open their\nrecruiting pipelines to the world, to divert real estate budget to R&D and to recognize that\nwe’re all better workers when we’re given the space to feel truly alive.\n\nMore importantly, the communities that matter to each of us have never needed our presence more.\nWorking remotely gives each person the autonomy to serve in a place that matters to them –\na place that has shaped them – contributing significantly to the wellbeing of a population\nthat may be at risk of losing its foundation, should talent continue to flee to the usual job centers.\n\n[Research from the University of New Hampshire](https://carsey.unh.edu/publication/rural-depopulation) has found that \"35% of rural counties in the United States are experiencing protracted and significant population loss.\" Speaking to shrinking towns across Europe, [a 2016 report from the European Parliamentary Research Service](http://www.europarl.europa.eu/thinktank/en/document.html?reference=EPRS_BRI(2016)586632) notes that \"younger members of society prefer to migrate to more economically vibrant regions and cities in search of better job prospects as, in most of these territories, professional opportunities remain limited and confined to specific fields (e.g. agriculture and tourism).\" I believe all-remote has the power to pause, and perhaps even reverse, these trends of depopulation.\n\n![Remote work can have outsized positive impact in cities like Accra](https://about.gitlab.com/images/blogimages/dm-accra-ghana.jpg){: .shadow.medium.center}\nRemote work can have outsized positive impact in cities like Accra\n{: .note.text-center}\n\nWhat would traffic in London, Moscow, Mexico City, and Rome look like if every person who *could* work remotely, did?\nWhat talent might we surface by tapping into burgeoning tech hubs in cities like Accra or Lagos? How\nmany San Franciscans – locals who have been priced out of their own city – could move back if some\nof the world’s greatest technical minds were empowered to work from anywhere? What would\nunderserved communities in rural regions across the globe be capable of if well-paying jobs\ncame their way via the internet?\n\nI don’t ask these questions hypothetically. I ask them because I want to leverage GitLab’s\nplatform to change the narrative on work, and I fully expect that we’ll see those answers in\nmy lifetime. It doesn’t hurt that GitLab (the product) is [tailor-made to enable remote work](/free-trial/),\neven across large teams.\n\n### Creating more remote opportunities for others\n\nI’ve had the great privilege of working remotely my entire career. I’ve shared memories with my\nfamily in over 50 countries and have celebrated milestones with colleagues whilst flying over a\nmillion miles on a single airline (thank you, Delta!).\n\nMy wife and I experienced the beautiful and transformative journey of adoption because I\nworked for an employer that trusted me to excel from a place I needed to be to see it through.\nI’ve met countless GitLabbers who have never been happier, more fulfilled, or more engaged with\ntheir family and community, all because they’re empowered to work remotely.\n\n![Remote work encourages exploration of both locales and cultures](https://about.gitlab.com/images/blogimages/dm-delta-munich-germany.jpg){: .shadow.medium.center}\nRemote work encourages exploration of both locales and cultures\n{: .note.text-center}\n\nI share this because I realize I’m one of the fortunate few, and I long for countless others to have\nthese same opportunities. GitLab is positioned to be of service to everyone – from startups, to\nentrepreneurs, to the world’s largest enterprises – in creating a remote infrastructure that works.\nI couldn’t be more excited to help enable precisely that.\n\nIf you're new to the concept of all-remote, I'd encourage you to dive into\nour [Handbook](/handbook/)\nand [learn how it works at GitLab](/company/culture/all-remote/tips/).\n\nIf you're ready to embrace the freedoms enabled by all-remote, browse\nour [vacancies](/jobs/) and join me on this journey!\n",[677,9,832,810],{"slug":895,"featured":6,"template":680},"all-remote-is-for-everyone","content:en-us:blog:all-remote-is-for-everyone.yml","All Remote Is For Everyone","en-us/blog/all-remote-is-for-everyone.yml","en-us/blog/all-remote-is-for-everyone",{"_path":901,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":902,"content":908,"config":914,"_id":916,"_type":14,"title":917,"_source":16,"_file":918,"_stem":919,"_extension":19},"/en-us/blog/american-fuzzy-lop-on-gitlab",{"title":903,"description":904,"ogTitle":903,"ogDescription":904,"noIndex":6,"ogImage":905,"ogUrl":906,"ogSiteName":667,"ogType":668,"canonicalUrls":906,"schema":907},"American Fuzzy Lop on GitLab: Automating instrumented fuzzing using pipelines","An example of how to automate instrumented fuzzing with American Fuzzy Lop using pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680723/Blog/Hero%20Images/aerial-shot-birds-eye-view.jpg","https://about.gitlab.com/blog/american-fuzzy-lop-on-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"American Fuzzy Lop on GitLab: Automating instrumented fuzzing using pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Luka Trbojevic\"}],\n        \"datePublished\": \"2019-08-14\",\n      }",{"title":903,"description":904,"authors":909,"heroImage":905,"date":911,"body":912,"category":720,"tags":913},[910],"Luka Trbojevic","2019-08-14","\n\nThis year at [BSides Kansas City](https://2019.bsideskc.org/), many of my conversations were about fuzzing and integrating security into the [DevOps process](/topics/devops/). Fuzzing has been around for a very long time. Back in 2006, I wrote my first (very simple) fuzzer to mutate .zip files and pass them to anti-virus programs; even at that time, the case for fuzzing had been made many years prior. Today, [American Fuzzy Lop (AFL)](http://lcamtuf.coredump.cx/afl/), written by Michal Zalewski, stands as one of the best fuzzers available, and is one of my favorite tools.\n\nAnecdotally, I've been seeing good arguments made for the adoption of fuzzing as part of the software security lifecycle more frequently than ever before. At BSides Kansas City, I listened to an interesting conversation at the speakers' dinner where the case was made that fuzzing is reduced to a niche, nice-to-have, wishlist item incorporated into workflows by exploit developers and only the largest of enterprises. I largely agree, but I like to think of why that's the case.\n\nMy general sense is that instrumented fuzzing, as a function of the software lifecycle, is still fraught with too much friction for widespread adoption. For something to take hold at scale, be useful to a large number of people, and have its benefits passed down to consumers, it needs to be quick and simple. Right now, fuzzing as something an **organization** does as part of its standard practice is not quick or simple. So, even if you have someone well-versed in fuzzing and exploit development, chances are fuzzing won’t survive as an organizational function.\n\nIn the hope that we can move the conversation forward, I wanted to give back something actionable – yet simple – to help folks incorporate fuzzing into their workflows. I’ve always found practical, hands-on examples to be the most helpful, so I put together a baseline sample showing how fuzzing with AFL can be automated as part of a pipeline.\n\nTwo important notes:\n\n* This blog isn’t an introduction to instrumented fuzzing and assumes you have a good understanding of it already, including what a test harness is and how to write one.\n* [Fuzzing as a feature of GitLab is in the works](https://gitlab.com/gitlab-org/gitlab-ee/issues/10852), but it's not here just yet. Also, the [first iteration](https://gitlab.com/gitlab-org/gitlab-ee/issues/8453) doesn't seem to include instrumented fuzzing.\n\nYou can find all code, including the Dockerfile, and detailed setup/modification instructions in the [AFL-GitLab repository](https://gitlab.com/ltrbojevic/afl-gitlab). **Please familiarize yourself with the background in the repository first!**\n\n## Docker image setup\n\nI prefer to work with Docker images, so I’ve used the [Docker executor](https://docs.gitlab.com/runner/executors/docker.html). AFL, your code, the test harness (if applicable), and the controlling Python script (more on that below) are in your Docker image.\n\nFor this example, we’re using Ubuntu 16.04. You can use any operating system you prefer. I run Ubuntu 16.04 for my fuzzing jobs, which is why I’ve used it here:\n\n`FROM ubuntu:16.04`\n\nI copy a local `fuzzing/` folder with all of my files to `/usr/src/` in the Docker image. This can be changed to whatever works for you:\n\n`COPY fuzzing/ /usr/src/`\n\nI set the user as `root` because I just want it to work. Customize this per your operating system, threat model, and risk tolerance:\n\n`USER root`\n\nThen just install whatever packages you need:\n\n```\nRUN apt-get update && apt-get install -y \\\n  sudo \\\n  software-properties-common \\\n  build-essential\n```\n\nNote that this image is optimized for compatibility and efficiency only.\n\n## Our sample target program\n\nFor this example, we’re going to be fuzzing [vulnerable.c](https://gitlab.com/ltrbojevic/afl-gitlab/blob/master/fuzzing/afl-gitlab/vulnerable.c). It features a total absence of security and C best practice and is designed to intentionally generate at least one unique crash within the first few seconds of fuzzing. It’s a quick and simple way to verify everything else is working.\n\n## Setting up AFL\n\nInstrumenting your program is done within the `.gitlab-ci.yml` file as part of the `before_script` parameter:\n\n```\nrun-afl:\nstage: run-afl\nbefore_script:\n- cd /usr/src/afl-2.52b\n- make\n- make install\n- cd /usr/src/afl-gitlab\n- CC=/usr/src/afl-2.52b/afl-gcc AFL_HARDEN=1 make\n- echo core >/proc/sys/kernel/core_pattern\n- echo $CI_PROJECT_DIR\n```\n\nI include `echo $CI_PROJECT_DIR` as a troubleshooting measure (more below).\n\n### A note on performance\n\nThis specific example uses GitLab.com [Shared Runners](https://docs.gitlab.com/ee/ci/runners/#shared-runners) for demonstration’s sake. The performance limitations of Shared Runners in the context of fuzzing make it infeasible to run instrumented fuzzing jobs in a performant way. Instead, you could consider using a self-hosted runner.\n\n## Initializing AFL\n\nThe problem with initiating AFL in `.gitlab-ci.yml` is AFL will continue to run until interrupted, so it must be stopped programmatically and that stop must be configurable to only run after a defined amount of time.\n\nTo solve this problem, we can have the pipeline run a script to manage the execution and handling of AFL. I prefer to use Python, but you can use any language you like. In Python, we can use the `time.sleep()` function. The Python script will initiate AFL and `sleep()` will be used to run AFL for whatever length of time you set. Afterwards, AFL will be stopped.\n\nIn `.gitlab.ci-yml`, we run this script **after** AFL is installed and our program is instrumented. We do this by doing the instrumentation using the `before_script` parameter and running the Python script using the `script` parameter:\n\n```\nscript:\n- python3 /usr/src/fuzzing/afl-gitlab/afl-gitlab.py\n```\n\n`afl-gitlab.py` is where the magic happens and it serves as the control center where the handling and synchronization of the different events that need to happen are managed and customized.\n\nTo start AFL, we use `subprocess.Popen()` to run the command:\n\n```subprocess.Popen([\"afl-fuzz\", \"-i\", \"inputs\", \"-o\", \"outputs\", \"./vulnerable\"])```\n\nTo control exactly how long AFL will run, we use `time.sleep()` -- in this example, it’s 30 seconds:\n\n```\ntime.sleep(30)\n```\n\nAfter that, we need to stop AFL in a way that doesn’t cause the pipeline to fail. If you exit `afl-python.py` itself like you might do locally, the job will fail. To get around this, we use `subprocess.Popen()` to stop AFL:\n\n```\nsubprocess.Popen([\"pkill\", \"-f\", \"afl\"])\n```\n\nLastly, we exit the `afl-python.py` in a way that doesn’t cause the job to fail:\n\n```\nos._exit(0)\n```\n\nIt’s important your script doesn’t cause the job to fail because everything else that needs to happen, won’t:\n\n![job fail message](https://about.gitlab.com/images/blogimages/jobfail.png){: .shadow.medium.center}\n\nJust in case the job fails and you don’t want your entire pipeline to fail, I set `allow_failure: true` in the `.gitlab-ci.yml` file.\n\n## Collecting AFL output as a pipeline artifact\n\nRunning AFL has no value if the output can’t be collected in a workflow-friendly way, so we’ll use [pipeline artifacts](https://docs.gitlab.com/ee/ci/pipelines/job_artifacts.html). The entire artifact collection process can be defined in the `.gitlab-ci-yml` file.\n\nFirst, using the `after_scripts` parameter, which will run after `afl-gitlab.py` exits, we copy the `outputs` folder to a location in `$CI_PROJECT_DIR` (thus the `echo $CI_PROJECT_DIR done earlier`). This is important because you may run into [artifact not found issues](https://stackoverflow.com/questions/47490688/gitlab-ci-artifacts-not-found):\n\n```\nafter_script:\n  - cp -a /usr/src/afl-gitlab/outputs $CI_PROJECT_DIR\n```\nThen we simply collect the `outputs` folder as an artifact:\n\n```\nartifacts:\npaths:\n- $CI_PROJECT_DIR/outputs\n```\n\nYour output will then be viewable like any other pipeline artifact:\n\n![output artifact](https://about.gitlab.com/images/blogimages/output-artifact.png){: .shadow.medium.center}\n\n## Creating issues for every unique crash\n\nTo make this a truly automated workflow, you could use the [GitLab API](https://docs.gitlab.com/ee/api/) to create an issue for every unique crash. At this time, I haven’t had the time to invest heavily in this, but I’ll have to circle back when I do.\n\nHaving played with the artifacts API for only a few brief moments, the path of least resistance seems to be adding the logic to `afl-gitlab.py` **prior** to the artifact collection.\n\nFor a specific example on how to use `python-gitlab` to create issues, check out [an issue generator script I wrote for the HIPAA Audit Protocol](https://gitlab.com/ltrbojevic/hipaa-audit-protocol-issue-generator).\n\n## Distributed fuzzing and multi-system parallelization\n\nThe basic principles of multi-system parallelization apply whether you're running distributed fuzzing jobs manually or automating them on GitLab. While I haven't had a chance to port my personal workflows to GitLab yet, a quick glance tells me it’s likely possible.\n\nUsing `afl-gitlab.py`, you could run a separate script to handle the deployment, configuration, and de-provisioning of -S mode instances. My initial preference would be to run a second Python script – let's call it `afl-gitlab-s.py`  that would use `python-terraform` to provision and deprovision the infrastructure. Fabric can be used to configure the instances, start AFL, and so on.\n\nThere would have to be some thought put into the timing and orchestration between the two scripts. It's also important to note: your synchronization scripts have to be timed so as not to de-provision -S mode instances before a synchronization event occurs; especially if you opt for staged synchronization.\n\nLists make the most sense to me, so in other words:\n1. A Runner job runs `afl-gitlab.py`\n2. `afl-gitlab.py` starts a second script, `afl-gitlab-s.py`\n3. `afl-gitlab-s.py` does a short sleep to allow `afl-gitlab.py` to run the -M mode instance\n3. `afl-gitlab-s.py` uses `python-terraform` to provision -S mode instances\n4. `afl-gitlab-s.py` then uses Fabric to configure the -S mode instances (e.g., set up synchronization scripts) and start AFL\n5. `afl-gitlab-s.py` sleeps for 1:55 hrs\n6. `afl-gitlab-s.py` de-provisions the -S mode instances\n7. `afl-gitlab.py` ends the -M mode instance fuzzing\n\nNote that I'm assuming you've modified your synchronization scripts to transfer `crashes` and `hangs` to the -M mode instance. Please remember that the out-of-the-box synchronization scripts transfer `queue`, so unless you've modified your script to transfer other folder contents as well, you may lose findings when the -S mode instances are de-provisioned.\n\n## Automating advanced output triage and analysis\n\nMany triage and initial analysis workflows can be, and are, automated. My personal workflow includes a combination AFL's crash explorer, pwndbg, and radare2. I’ve condensed most of my initial triage to a single Python tool. The tool could be run as a Runner job, either on the Runner instance itself or, as above with the -S mode instances, offloaded to another box with the results returned to the Runner instance. Given that, and regardless of the language of your toolset, automating output triage and initial analysis seems to be doable with some tinkering.\n\nMuch like automatically creating an issue for every unique crash found, I’ve not yet had the time to focus on this and give it a go, but I’ll circle back and add it when I can.\n\n## Some final thoughts\n\nThere's a whole bunch we could automate and make more accessible in the fuzzing space. In the interest of reaching and benefiting the largest number of people, I’d love to see GitLab have mature fuzzing features that are helpful to developers and security folk alike. To help move that forward, I think community involvement in key. If you have the inclination please contribute to our [direction](https://gitlab.com/gitlab-org/gitlab-ee/issues/10852). And if, instead, you want to take this example and expand it outside of the GitLab ecosystem, please do and tell us how it went!\n\nPhoto by [Tom Fisk](https://www.pexels.com/@tomfisk?utm_content=attributionCopyText&utm_medium=referral&utm_source=pexels) on [Pexels](https://www.pexels.com)\n{: .note}\n",[9,720],{"slug":915,"featured":6,"template":680},"american-fuzzy-lop-on-gitlab","content:en-us:blog:american-fuzzy-lop-on-gitlab.yml","American Fuzzy Lop On Gitlab","en-us/blog/american-fuzzy-lop-on-gitlab.yml","en-us/blog/american-fuzzy-lop-on-gitlab",{"_path":921,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":922,"content":928,"config":934,"_id":936,"_type":14,"title":937,"_source":16,"_file":938,"_stem":939,"_extension":19},"/en-us/blog/an-ode-to-stable-counterparts",{"title":923,"description":924,"ogTitle":923,"ogDescription":924,"noIndex":6,"ogImage":925,"ogUrl":926,"ogSiteName":667,"ogType":668,"canonicalUrls":926,"schema":927},"An ode to stable counterparts","Our workflow model streamlines decision making, cultivates trust, and promotes cross-functional collaboration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679002/Blog/Hero%20Images/stablecounterparts.jpg","https://about.gitlab.com/blog/an-ode-to-stable-counterparts","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An ode to stable counterparts\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-10-16\",\n      }",{"title":923,"description":924,"authors":929,"heroImage":925,"date":931,"body":932,"category":787,"tags":933},[930],"Suri Patel","2018-10-16","\n_They said [this model](/handbook/leadership/#stable-counterparts) would help us thrive._\n_To foster trust, familiarity, and drive,_\u003Cbr/>\n_We would work side-by-side, knitting our workflows_\u003Cbr/>\n_And supporting one another in our highs and lows._\u003Cbr/>\n\n_Before we embarked on our journey, I fretted and fussed._\u003Cbr/>\n_With a furrowed brow, I felt a careful trust_\u003Cbr/>\n_In my leadership who often discussed_\u003Cbr/>\n_The need to readjust lest we combust._\u003Cbr/>\n\n_We shipped and scaled and detailed_\u003Cbr/>\n_Our results._\u003Cbr/>\n_Seamlessly soaring towards Two and Twenty,_\u003Cbr/>\n_Our managers said, “In their progress, that_\u003Cbr/>\n_team exults.”_\u003Cbr/>\n_We collaborate, update, and accelerate with flair._\u003Cbr/>\n\n_And now I must declare:_\u003Cbr/>\n_I have drawn the ace of hearts_\u003Cbr/>\n_With my team of stable counterparts!_\u003Cbr/>\n\nAt GitLab, we adopted a stable counterparts model to facilitate cross-functional\nconnections in the hope that working with the same people would increase the\nspeed of communication, build trust, and encourage iteration. In a stable\ncounterparts model, every team works with the\n[same team members](/handbook/engineering/development/dev/create/source-code-be/#stable-counterparts),\nincluding frontend engineers, UX designers, and test automation engineers, for\neach release, creating a smaller team within GitLab.\n\n## The benefits of stable counterparts\n\nThe ability to build long-term relationships is the foundational benefit of\nhaving stable counterparts. Repeated interactions helps us understand personal\nworkflows and communication styles, so we know how to most effectively work with\nour counterparts. Knowing how to best communicate with someone is a great benefit\nwhen working in high pressure situations or resolving conflict. Consistent\ncollaboration means faster results and more efficient processes.\n\nIn addition to building long-term relationships, we’ve noticed a few other\ninteresting benefits to having stable counterparts.\n\n- **Enabling a faster workflow**: There are some product areas that are easy to\nunderstand because every team member engages with them, but there are some that\nare challenging, such as [CI](https://docs.gitlab.com/ee/ci/),\n[security](https://docs.gitlab.com/ee/user/project/merge_requests/#security-reports),\nor [Kubernetes](https://docs.gitlab.com/ee/user/project/clusters/index.html),\nthat require domain knowledge that can be harder for a team member to quickly\nfathom without a certain amount of pre-knowledge. When a stable counterpart has\ndeveloped deeper understanding in complex areas, others know who to quickly\nconsult when confronted with a specific technical challenge, an insight that\ndrives velocity since team members are no longer blocked trying to determine who\ncan offer assistance.\n- **Promoting long-term brainstorming**: In traditional workflow models, product\nmanagers often have individual meetings with engineering managers, UX designers,\nand frontend managers in which brainstorming through ideas and talking about\nlong-term goals happens in silos. With stable counterparts, discussion benefits\nfrom cross-functional perspective, enhancing ideas, and igniting creativity,\nwhich can take place over several milestones.\n- **Increasing familiarity and comfort with problems**: Working with a rotating\nset of team members can result in a lack of comprehensive historical knowledge\non an issue, causing delays while team members digest information and become\nacquainted with the state of a solution. By working with the same people over\nseveral releases, we’re able to provide context early and implement learnings\nto solve problems in the right way.\n\n## Let’s talk about workflow impact\n\nWorking with stable counterparts has helped the team develop a faster and more\niterative workflow. We’re more focused in that we can pick up on discussions and\nitems that we tinkered with in previous releases. We now approach problems with\na deeper understanding, since we have long-term insight into why changes are\nimportant. Taking context from release to release and retaining that knowledge\nensures that we develop thoughtful solutions, especially since we feel a higher\nsense of ownership of projects because we’ve been involved throughout every stage.\n\nThis model has also resulted in better dependency management. We spend a lot of\ntime doing upfront investment into project planning and prioritization, so teams\nhave visibility into collaboration with backend and frontend. This makes it\neasier to see whether we need more backend or frontend resources in certain areas\nand to allocate engineers as needed.\n\n## Sounds great, but what are the drawbacks?\n\nThis model could lead to engineers feeling like they’re feature factories, so\nleadership must actively work to keep their team on an edge so that there’s a\nhealthy balance between product features and other tasks that are more complex\nor exciting.\n\nWhen working with stable counterparts, there’s a potential for conflict and\npersonality issues. If personal communication styles or workflows don’t align,\ninteractions can become tense and handoffs can be fraught with friction. When\npairing stable counterparts, leadership should consider personalities,\ncommunication styles, and workflows to ensure that a team, at baseline, can work\nwell together.\n\nWorking with the same people for too long means that we’re not exposed to a\nbroader audience and may not have fresh ideas come into conversations. It’s\npossible that teams become comfortable with the way things are and ideas are no\nlonger questioned. We haven’t encountered this problem at GitLab yet, since we’re\n[growing](/jobs/) so quickly that every team frequently has a change or new addition,\nwhich is accompanied by a variety of new questions and unique feedback. For\nteams that don’t have as much growth, it can be useful to invite other team\nmembers to provide perspective and question long-held beliefs.\n\n## Advice for other teams\n\nIf your team is interested in adopting a similar model, we suggest starting\nsmall and breaking teams into smaller components. For teams that are unaccustomed\nto an interdisciplinary model with agile teams, it can be a difficult adjustment,\nso it’s important that teams are structured around either a specific initiative\nor area of the product. To determine whether this is a model that could benefit\nyour organization, consider selecting a problem and pairing the same 4-5 team\nmembers, including a product manager, a UX designer, and a few engineers, for\nseveral releases until the problem is solved. Working together for several\nreleases helps team members nurture a strong, stable relationship, so it’s\nimportant that they’re given enough time to learn about and from each other.\n\nAlthough stable counterparts has worked well for GitLab’s workflow, it’s\nimportant to be sure that this is the model that fits _your_ company’s needs.\nDeveloping a workflow depends on strategy, targets, and the maturity level of an\norganization. These are all variables that need to be considered when building\nor changing a process. This setup wouldn’t have worked for GitLab 12 months ago,\nbut it works now, so continue to experiment and examine options as your team and\norganization develop. Whether you pursue a stable counterparts model or some other\nsetup, remember to select an approach that complements your organization and the\nproduct you’re building.\n\n_The writer is grateful to [Jeremy Watson](/company/team/#d3arWatson),\n[Liam McAndrew](/company/team/#lmcandrew), [John Jeremiah](/company/team/#j_jeremiah), and\n[Tim Zallman](/company/team/#tpmtim) for sharing their experiences as stable counterparts._\n",[723,9,811],{"slug":935,"featured":6,"template":680},"an-ode-to-stable-counterparts","content:en-us:blog:an-ode-to-stable-counterparts.yml","An Ode To Stable Counterparts","en-us/blog/an-ode-to-stable-counterparts.yml","en-us/blog/an-ode-to-stable-counterparts",{"_path":941,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":942,"content":948,"config":954,"_id":956,"_type":14,"title":957,"_source":16,"_file":958,"_stem":959,"_extension":19},"/en-us/blog/announcing-100m-series-d-funding",{"title":943,"description":944,"ogTitle":943,"ogDescription":944,"noIndex":6,"ogImage":945,"ogUrl":946,"ogSiteName":667,"ogType":668,"canonicalUrls":946,"schema":947},"Announcing $100 million in Series D round funding led by ICONIQ Capital","Today we announced $100M in new funding to beat nine best-in-class products with a single application.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663655/Blog/Hero%20Images/gitlab-live-sept-2018.png","https://about.gitlab.com/blog/announcing-100m-series-d-funding","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing $100 million in Series D round funding led by ICONIQ Capital\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2018-09-19\",\n      }",{"title":943,"description":944,"authors":949,"heroImage":945,"date":951,"body":952,"category":299,"tags":953},[950],"GitLab","2018-09-19","Today we are thrilled to announce our $100 million Series D funding led by ICONIQ Capital, bringing our valuation to over $1 billion and validating our position as the world’s first single application for the entire DevOps lifecycle. We’re elated that ICONIQ shares our vision of beating out nine other products with a single, best-in-class application that integrates each stage of the DevOps lifecycle. We plan to use the funding to become best-in-class in every DevOps software category from planning to monitoring. More than 100,000 organizations use GitLab to deploy to multiple clouds, implement [cloud native](/topics/cloud-native/) architectures, and practice Concurrent DevOps. This results in a 200 percent faster DevOps lifecycle with unmatched visibility, higher levels of efficiency, and comprehensive governance.\n\n> “GitLab is emerging as a leader across the entire software development ecosystem by releasing software at a pace that is unmatched by any competitor. They’re taking the broad software development market head-on by developing an application that allows organizations to churn out software at an accelerated rate with cost and time savings.” - Matthew Jacobson, General Partner at ICONIQ Capital\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZgFqyXCsqPY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vTO_mVE0psqDSIOwmrv30ebL0IMdAIhYFHqBcoqI6b8_Cl1yl8f6FaAIm-d7qwsOWhhiUIqPxo6fjhH/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"1280\" height=\"749\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\nEnterprises are facing a tool chain crisis, investing time and resources into piecing together disparate tools from different stages of the software development and operations lifecycle. A typical enterprise needs to integrate tools like VersionOne, Jira, GitHub, Jenkins, Artifactory, Electric Cloud, Puppet, New Relic, and BlackDuck. This causes poor visibility because data lives in so many different tools, slow cycle time because teams need to wait on each other, and bolted on security as an afterthought.\n\nOur open core application, which has more than 2,000 contributors, is breaking down this barrier by building features for each stage of the DevOps lifecycle into a single application. This enables Concurrent DevOps, the ability for teams to manage, plan, create, verify, package, release, configure, monitor, and secure software together so they can have visibility into what matters, start without waiting, and ship with confidence.\n\n> “Our goal is to strive for less people managing processes and more automation within our workflow. GitLab does just that by eliminating the complicated web that tied all of our development tools together, so we now have a single, automated application that makes our team more efficient.” — Adam Dehnel, product architect, BI Worldwide\n\nWe started as source code management based on Git. But when announcing our series C last year, we [voiced the ambition](/blog/gitlab-raises-20-million-to-complete-devops/) to grow beyond that to cover every product category of the DevOps lifecycle. Some features have already become best in class, like the continuous integration system that scored highest in the current offering category in The Forrester Wave™: Continuous Integration Tools, Q3 2017 report, and was recognized as a \"Strong Performer\" in The Forrester New Wave™ for Value Stream Management Tools, Q3 2018. With this latest funding round and the growing number of people contributing code to GitLab, we are well positioned for all of our features to become best-in-class.\n\n> “Two of the defining characteristics of modern application development are an open, collaborative process and toolchains that transcend individual product categories from planning out to operations. By marrying an open source development process to a comprehensive set of functional capabilities, GitLab aims to leverage those precise qualities in a single application that meets enterprises’ DevOps needs.” - Stephen O’Grady, Principal Analyst with RedMonk\n\n> “Since raising a Series C round last year, we’ve delivered on our commitment to bring a single application for the entire DevOps lifecycle to market, and as a result have been able to reach over $1 billion in valuation. With this latest funding round, we will continue to improve upon our suite by building out our management, planning, packaging, monitoring and security features for a more robust DevOps application.” – Sid Sijbrandij, CEO of GitLab\n\nGitLab is purpose-built for organizations that are undergoing a digital transformation. Our focus is on supporting organizations that are aiming for faster DevOps lifecycles, cloud native architectures, and multi-cloud deployments. Some of our recent product milestones include the release of [Auto DevOps](/press/releases/2018-06-22-auto-devops-gitlab-11.html) to accelerate the DevOps lifecycle by 200 percent, a [Kubernetes integration](/releases/2018/03/22/gitlab-10-6-released/) so clusters can be spun up from within GitLab, and [enhancements to the Web IDE](/releases/2018/08/22/gitlab-11-2-released/) to make code changes easier for everyone.\n\nThe latest round brings our valuation to over $1 billion and validates our position as the world’s first single application for the entire DevOps lifecycle. As other DevOps tools become locked in to a single cloud, our customers are embracing a multi-cloud future, so their tooling must be independent of any single cloud provider and work across on-premises, private and public clouds. Customers want their tooling to be consistent, whether they deploy it themselves or use it as a service.\n\n> “Deployments are no longer limited to a traditional software release cycle. Using GitLab-CI for our Continuous Deployments over-the-air, we have empowered the customer to be their own technician while increasing the rate at which software can be delivered.” - Chris Hill, head of systems engineering, next generation infotainment at Jaguar Land Rover.\n\nWe began in 2014 with a mission to change all creative work from read-only to read-write, so that everyone can contribute. Since then, our all-remote company has grown from fewer than 10 to more than 350 team members in over 40 countries across the globe. And we’re not slowing down – we’re still [hiring for 77 positions](/jobs/)!\n\nWe were recently [recognized by Inc. Magazine as No. 44 out of the 5,000 fastest-growing private companies in the United States](/blog/gitlab-ranked-44-on-inc-5000-list/). We attribute our success to our open core model and our value of transparency. We have an emphasis on co-creation and a commitment to open source – [over 2,000 users and customers have contributed to GitLab’s code base](http://contributors.gitlab.com/). This philosophy helps build stronger customer relationships, which in turn result in a direct influence on feature updates to the product.\n\n## Get involved\n\nWe owe GitLab’s existence to your enthusiasm, drive, and hard work. Without our contributors’ belief in open source software, we would not be where we are today. We need your help to make our collective vision a reality. Everyone can contribute!\n\nWe are committed to standing by our [promise to be good stewards of open source](/blog/being-a-good-open-source-steward/),\nand keeping communication and collaboration amongst the community a high priority.\n\nTo get started you can learn more about [Concurrent DevOps](/topics/devops/ ), ready stories of [how others are using GitLab](/customers/), or visit our [contribution guide](/community/contribute/).",[675,9,873],{"slug":955,"featured":6,"template":680},"announcing-100m-series-d-funding","content:en-us:blog:announcing-100m-series-d-funding.yml","Announcing 100m Series D Funding","en-us/blog/announcing-100m-series-d-funding.yml","en-us/blog/announcing-100m-series-d-funding",{"_path":961,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":962,"content":968,"config":973,"_id":975,"_type":14,"title":976,"_source":16,"_file":977,"_stem":978,"_extension":19},"/en-us/blog/anomaly-detection-using-prometheus",{"title":963,"description":964,"ogTitle":963,"ogDescription":964,"noIndex":6,"ogImage":965,"ogUrl":966,"ogSiteName":667,"ogType":668,"canonicalUrls":966,"schema":967},"How to use Prometheus for anomaly detection in GitLab","Explore how Prometheus query language can be used to help you diagnose incidents, detect performance regressions, tackle abuse, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667819/Blog/Hero%20Images/anomaly-detection-cover.png","https://about.gitlab.com/blog/anomaly-detection-using-prometheus","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Prometheus for anomaly detection in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-07-23\",\n      }",{"title":963,"description":964,"authors":969,"heroImage":965,"date":970,"body":971,"category":743,"tags":972},[672],"2019-07-23","\n\nOne of the more basic functions of the Prometheus query language is real-time aggregation of [time series data](https://prometheus.io/docs/prometheus/latest/querying/basics/). [Andrew Newdigate](/company/team/#suprememoocow), a distinguished engineer on the GitLab infrastructure team, hypothesized that Prometheus query language can also be used to detect anomalies in time series data.\n\n[Andrew broke down the different ways Prometheus can be used](https://vimeo.com/341141334) for the attendees of [Monitorama 2019](https://monitorama.com/index.html). This blog post explains how anomaly detection works with Prometheus and includes the code snippets you’ll need to try it out for yourself on your own system.\n\n## Why is anomaly detection useful?\n\nThere are four key reasons why anomaly detection is important to GitLab:\n\n1. **Diagnosing incidents**: We can figure out which services are performing outside their normal bounds quickly and reduce the average time it takes to [detect an incident (MTTD)](/handbook/engineering/infrastructure/incident-management/), bringing about a faster resolution.\n2. **Detecting application performance regressions**: For example, if an n + 1 regression is introduced and leads to one service calling another at a very high rate, we can quickly track the issue down and resolve it.\n3. **Identify and resolve abuse**: GitLab offers free computing ([GitLab CI/CD](/topics/ci-cd/)) and hosting (GitLab Pages), and there are a small subset of users who might take advantage.\n4. **Security**: Anomaly detection is essential to spotting unusual trends in GitLab time series data.\n\nFor these reasons and many others, Andrew investigated whether it was possible to perform anomaly detection on GitLab time series data by simply using Prometheus queries and rules.\n\n## What level of aggregation is the correct one?\n\nFirst, time series data must be aggregated correctly. Andrew used a standard counter of `http_requests_total` as the data source for this demonstration, although many other metrics can be applied using the same techniques.\n\n```\nhttp_requests_total{\n job=\"apiserver\",\n method=\"GET\",\n controller=\"ProjectsController\",\n status_code=\"200\",\n environment=\"prod\"\n}\n```\n{: .language-ruby}\n\nThis example metric has **some extra dimensions**: `method`, `controller`, `status_code`, `environment`, as well as the dimensions that Prometheus adds, such as `instance` and `job`.\n\nNext, you must choose the correct level of aggregation for the data you are using. This is a bit of a Goldilocks problem – too much, too little, or just right – but it is essential for finding anomalies. By **aggregating the data too much**, it can be reduced to too few dimensions, creating two potential problems:\n\n1. You can miss genuine anomalies because the aggregation hides problems that are occurring within subsets of your data.\n2. If you do detect an anomaly, it's difficult to attribute it to a particular part of your system without more investigation into the anomaly.\n\nBut by **aggregating the data too little**, you might end up with a series of data with very small sample sizes which can lead to false positives and could mean flagging genuine data as outliers.\n\nJust right: Our experience has shown the **right level of aggregation is the service level**, so we include the job label and the environment label, but drop other dimensions. The data aggregation used through the rest of the talk includes: job `http requests`, rate five minutes, which is basically a rate across job and environment on a five-minute window.\n\n```\n- record: job:http_requests:rate5m\nexpr: sum without(instance, method, controller, status_code)\n(rate(http_requests_total[5m]))\n# --> job:http_requests:rate5m{job=\"apiserver\", environment=\"prod\"}  21321\n# --> job:http_requests:rate5m{job=\"gitserver\", environment=\"prod\"}  2212\n# --> job:http_requests:rate5m{job=\"webserver\", environment=\"prod\"}  53091\n```\n{: .language-ruby}\n\n## Using z-score for anomaly detection\n\nSome of the primary principles of statistics can be applied to detecting anomalies with Prometheus.\n\nIf we know the average value and [standard deviation (σ)](https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/standard-deviation/) of a Prometheus series, we can use any sample in the series to calculate the z-score. The z-score is measured in the number of standard deviations from the mean. So a z-score of 0 would mean the z-score is identical to the mean in a data set with a normal distribution, while a z-score of 1 is 1.0 σ from the mean, etc.\n\nAssuming the underlying data has a normal distribution, 99.7% of the samples should have a z-score between zero to three. The further the z-score is from zero, the less likely it is to exist. We apply this property to detecting anomalies in the Prometheus series.\n\n1. Calculate the average and standard deviation for the metric using data with a large sample size. For this example, we use one week’s worth of data. If we assume we're evaluating the recording rule once a minute, over a one-week period we'll have just over 10,000 samples.\n\n```\n# Long-term average value for the series\n- record: job:http_requests:rate5m:avg_over_time_1w\nexpr: avg_over_time(job:http_requests:rate5m[1w])\n\n# Long-term standard deviation for the series\n- record: job:http_requests:rate5m:stddev_over_time_1w\nexpr: stddev_over_time(job:http_requests:rate5m[1w])\n```\n{: .language-ruby}\n\n2.  We can calculate the z-score for the Prometheus query once we have the average and standard deviation for the aggregation.\n\n```\n# Z-Score for aggregation\n(\njob:http_requests:rate5m -\njob:http_requests:rate5m:avg_over_time_1w\n) /  job:http_requests:rate5m:stddev_over_time_1w\n```\n{: .language-ruby}\n\nBased on the statistical principles of normal distributions, **we can assume that any value that falls outside of the range of roughly +3 to -3 is an anomaly**. We can build an alert around this principle. For example, we can get an alert when our aggregation is out of this range for more than five minutes.\n\n![Graph showing RPS on GitLab.com over 48 hours](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image1.png){: .shadow.medium.center}\n\nGitLab.com Pages service RPS over 48 hours, with ±3 z-score region in green\n{: .note.text-center}\n\nZ-scores are a bit awkward to interpret on a graph because they don’t have a unit of measurement. But anomalies on this chart are easy to detect. Anything that appears outside of the green area (which denotes z-scores that fall within a range of +3 or -3) is an anomaly.\n\n### What if you don’t have a normal distribution?\n\n**But wait**: We make a big leap by assuming that our underlying aggregation has a normal distribution. If we calculate the z-score with data that isn’t normally distributed, our results will be incorrect.\n\nThere are numerous statistical techniques for testing your data for a normal distribution, but the best option is to test that your underlying data has a z-score of about **+4 to -4**.\n\n```\n(\n max_over_time(job:http_requests:rate5m[1w]) - avg_over_time(job:http_requests:rate5m[1w])\n) / stddev_over_time(job:http_requests:rate5m[1w])\n# --> {job=\"apiserver\", environment=\"prod\"}  4.01\n# --> {job=\"gitserver\", environment=\"prod\"}  3.96\n# --> {job=\"webserver\", environment=\"prod\"}  2.96\n\n(\n min_over_time(job:http_requests:rate5m[1w]) - avg_over_time(job:http_requests:rate5m[1w])\n) / stddev_over_time(job:http_requests:rate5m[1w])\n# --> {job=\"apiserver\", environment=\"prod\"}  -3.8\n# --> {job=\"gitserver\", environment=\"prod\"}  -4.1\n# --> {job=\"webserver\", environment=\"prod\"}  -3.2\n```\n{: .language-ruby}\n\nTwo Prometheus queries testing the minimum and maximum z-scores.\n{: .note.text-center}\n\nIf your results return with a range of +20 to -20, the tail is too long and your results will be skewed. Remember too that this needs to be run on an aggregated, not unaggregated series. Metrics that probably don’t have normal distributions include things like error rates, latencies, queue lengths etc., but many of these metrics will tend to work better with fixed thresholds for alerting anyway.\n\n## Detecting anomalies using seasonality\n\nWhile calculating z-scores works well with normal distributions of time series data, there is a second method that can yield _even more accurate_ anomaly detection results. **Seasonality** is a characteristic of a time series metric in which the metric experiences regular and predictable changes that recur every cycle.\n\n![Graph showing Gitaly RPS, Mon-Sun over four weeks](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image2.png){: .shadow.medium.center}\n\nGitaly requests per second (RPS), Monday-Sunday, over four consecutive weeks\n{: .note.text-center}\n\nThis graph illustrates the RPS (requests per second) rates for Gitaly over seven days, Monday through Sunday, over four consecutive weeks. The seven-day range is referred to as the “offset,” meaning the pattern that will be measured.\n\nEach week on the graph is in a different color. The seasonality in the data is indicated by the consistency in trends indicated on the graph – every Monday morning, we see the same rise in RPS rates, and on Friday evenings, we see the RPS rates drop off, week after week.\n\nBy leveraging the seasonality in our time series data we can create more accurate predictions which will lead to better anomaly detection.\n\n### How do we leverage seasonality?\n\nCalculating seasonality with Prometheus required that we iterate on a few different statistical principles.\n\nIn the first iteration, we calculate by adding the growth trend we’ve seen over a one-week period to the value from the previous week. Calculate the growth trend by subtracting the rolling one-week average for last week from the rolling one-week average for now.\n\n```\n- record: job:http_requests:rate5m_prediction\n  expr: >\n    job:http_requests:rate5m offset 1w                     # Value from last period\n    + job:http_requests:rate5m:avg_over_time_1w            # One-week growth trend\n    - job:http_requests:rate5m:avg_over_time_1w offset 1w\n```\n\nThe first iteration is a bit narrow; we’re using a five-minute window from this week and the previous week to derive our predictions.\n\nIn the second iteration, we expand our scope by taking the average of a four-hour period for the previous week and comparing it to the current week. So, if we’re trying to predict the value of a metric at 8am on a Monday morning, instead of using the same five-minute window from one week prior, we use the average value for the metric from 6am until 10am for the previous morning.\n\n```\n- record: job:http_requests:rate5m_prediction\n  expr: >\n    avg_over_time(job:http_requests:rate5m[4h] offset 166h) # Rounded value from last period\n    + job:http_requests:rate5m:avg_over_time_1w             # Add 1w growth trend\n    - job:http_requests:rate5m:avg_over_time_1w offset 1w\n```\n{: .language-yaml}\n\nWe use the 166 hours in the query instead of one week because we want to use a four-hour period based on the current time of day, so we need the offset to be two hours short of a full week.\n\n![Comparing the real Gitaly RPS with our prediction](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image3.png){: .shadow.medium.center}\n\nGitaly service RPS (yellow) vs prediction (blue), over two weeks.\n{: .note.text-center}\n\nA comparison of the actual Gitaly RPS (yellow) with our prediction (blue) indicate that our calculations were fairly accurate. However, this method has a flaw.\n\nGitLab usage was lower than the typical Wednesday because May 1 was International Labor Day, a holiday celebrated in many different countries. Because our growth rate is informed by the previous week’s usage, our predictions for the next week, on Wednesday, May 8, were for a lower RPS than it would have been had it not been a holiday on Wednesday, May 1.\n\nThis can be fixed by making three predictions for three consecutive weeks before Wednesday, May 1; for the previous Wednesday, the Wednesday before that, and the Wednesday before that. The query stays the same, but the offset is adjusted.\n\n```\n- record: job:http_requests:rate5m_prediction\n  expr: >\n   quantile(0.5,\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 166h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 1w\n       , \"offset\", \"1w\", \"\", \"\")\n     or\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 334h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 2w\n       , \"offset\", \"2w\", \"\", \"\")\n     or\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 502h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 3w\n       , \"offset\", \"3w\", \"\", \"\")\n   )\n   without (offset)\n```\n{: .language-yaml}\n\n![A graph showing three predictions for three Wednesdays vs. actual Gitaly RPS](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image4.png){: .shadow.medium.center}\n\nThree predictions for three Wednesdays vs actual Gitaly RPS, Wednesday, May 8 (one week following International Labor Day)\n{: .note.text-center}\n\nOn the graph we’ve plotted Wednesday, May 8 and three predictions for the three consecutive weeks before May 8. We can see that two of the predictions are good, but the May 1 prediction is still far off base.\n\nAlso, we don’t want three predictions, we want **one prediction**. Taking the average is not an option, because it will be diluted by our skewed May 1 RPS data. Instead, we want to calculate the median. Prometheus does not have a median query, but we can use a quantile aggregation in lieu of the median.\n\nThe one problem with this approach is that we're trying to include three series in an aggregation, and those three series are actually all the same series over three weeks. In other words, they all have the same labels, so connecting them is tricky. To avoid confusion, we create a label called `offset` and use the label-replace function to add an offset to each of the three weeks. Next, in the quantile aggregation, we strip that off, and that gives us the middle value out of the three.\n\n```\n- record: job:http_requests:rate5m_prediction\n  expr: >\n   quantile(0.5,\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 166h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 1w\n       , \"offset\", \"1w\", \"\", \"\")\n     or\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 334h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 2w\n       , \"offset\", \"2w\", \"\", \"\")\n     or\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 502h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 3w\n       , \"offset\", \"3w\", \"\", \"\")\n   )\n   without (offset)\n```\n{: .language-yaml}\n\nNow, our prediction deriving the median value from the series of three aggregations is much more accurate.\n\n![Graph showing median predications vs. actual Gitaly RPS on Weds May 8](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image5.png){: .shadow.medium.center}\n\nMedian predictions vs actual Gitaly RPS, Wednesday, May 8 (one week following International Labor Day)\n{: .note.text-center}\n\n### How do we know our prediction is truly accurate?\n\nTo test the accuracy of our prediction, we can return to the z-score. We can use the z-score to measure the sample's distance from its prediction in standard deviations. The more standard deviations away from our prediction we are, the greater the likelihood is that a particular value is an outlier.\n\n![Predicted normal range +1.5σ/-1.5σ](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image6.png){: .shadow.medium.center}\n\nPredicted normal range ± 1.5σ for Gitaly Service\n{: .note.text-center}\n\nWe can update our Grafana chart to use the seasonal prediction rather than the weekly rolling average value. The range of normality for a certain time of day is shaded in green. Anything that falls outside of the shaded green area is considered an outlier. In this case, the outlier was on Sunday afternoon when our cloud provider encountered some network issues.\n\nUsing boundaries of ±2σ on either side of our prediction is a pretty good measurement for determining an outlier with seasonal predictions.\n\n## How to set up alerting using Prometheus\n\nIf you want to set up alerts for anomaly events, you can apply a pretty straightforward rule to Prometheus that checks if the z-score of the metric is between a standard deviation of **+2 or -2**.\n\n```\n- alert: RequestRateOutsideNormalRange\n  expr: >\n   abs(\n     (\n       job:http_requests:rate5m - job:http_requests:rate5m_prediction\n     ) / job:http_requests:rate5m:stddev_over_time_1w\n   ) > 2\n  for: 10m\n  labels:\n    severity: warning\n  annotations:\n    summary: Requests for job {{ $labels.job }} are outside of expected operating parameters\n```\n{: .language-yaml}\n\nAt GitLab, we use a custom routing rule that pings Slack when any anomalies are detected, but doesn’t page our on-call support staff.\n\n## The takeaway\n\n1. Prometheus can be used for some types of anomaly detection\n2. The right level of data aggregation is the key to anomaly detection\n3. Z-scoring is an effective method, if your data has a normal distribution\n4. Seasonal metrics can provide great results for anomaly detection\n\nWatch Andrew’s full presentation from [Monitorama 2019](https://monitorama.com/index.html). If you have questions for Andrew, reach him on Slack at #talk-andrew-newdigate. You can also read more about [why you need Prometheus](/blog/why-all-organizations-need-prometheus/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/341141334?portrait=0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n",[9],{"slug":974,"featured":6,"template":680},"anomaly-detection-using-prometheus","content:en-us:blog:anomaly-detection-using-prometheus.yml","Anomaly Detection Using Prometheus","en-us/blog/anomaly-detection-using-prometheus.yml","en-us/blog/anomaly-detection-using-prometheus",{"_path":980,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":981,"content":987,"config":994,"_id":996,"_type":14,"title":997,"_source":16,"_file":998,"_stem":999,"_extension":19},"/en-us/blog/applying-risk-management-to-remote-learning",{"title":982,"description":983,"ogTitle":982,"ogDescription":983,"noIndex":6,"ogImage":984,"ogUrl":985,"ogSiteName":667,"ogType":668,"canonicalUrls":985,"schema":986},"Applying risk management to pandemic-driven remote learning","A GitLab team member and parent offers some tips to improve today’s remote learning experience.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672774/Blog/Hero%20Images/pexels-august.jpg","https://about.gitlab.com/blog/applying-risk-management-to-remote-learning","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Applying risk management to pandemic-driven remote learning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Meghan Maneval\"}],\n        \"datePublished\": \"2020-08-27\",\n      }",{"title":982,"description":983,"authors":988,"heroImage":984,"date":990,"body":991,"category":698,"tags":992},[989],"Meghan Maneval","2020-08-27","Like many of you, when COVID-19 began to spread in the Spring of 2020, I never imagined just how much my life would change. While I personally was accustomed to working remotely, my husband and children certainly were not. As the pandemic continues, parents around the world are faced with a new challenge: how to simultaneously manage their careers and their children’s educational needs. The risks, at times, can feel insurmountable. I went through every emotion this summer as I tried to strategize for what pandemic-driven remote learning would look like for my family. And then I realized, why am I trying to recreate the wheel? As an all-remote company, [GitLab’s values](https://handbook.gitlab.com/handbook/values/) and [all-remote culture](/company/culture/all-remote/) provide a proven model for successfully managing a remote workforce. So why not try it out with my kids? \n\nSo with that knowledge and appreciation, I decided to utilize the basic principles of [risk management](/handbook/security/security-assurance/security-risk/storm-program/index.html) to manage my family’s work and learn from home routine. But don’t worry, you don’t have to be a compliance professional to utilize this technique. In this blog post, I've mapped out the steps I used with my family that I hope will contribute to a more successful 2020/2021 school year for families. \n\nBefore you start, it is critical to remember, you can never fully eliminate risk. The steps below are designed to reduce the risk to align with your risk appetite. Only you can determine what level of risk you will accept. Some people, like myself, may be more risk-averse and therefore seek to plan out everything to the smallest detail. Others might be more risk-tolerant and willing to let things “slide” a little. No matter where you fall on that spectrum, you can utilize the steps below to document and execute a successful pandemic-driven remote learning plan. \n\n### 1: Identify\n\nThe first stage of risk management is to identify possible risks. If you don’t know what could go wrong you can’t prepare for it! It’s important to [collaborate](https://handbook.gitlab.com/handbook/values/#collaboration) with each member of your family and understand their specific needs and concerns. As parents, we all know that each of our children has different needs. The same is true for their education: what works for one student won’t work for all students.\n\nLet’s consider last spring as our “trial run”. For remote learning, discuss with your children what they enjoyed about that time and what didn’t work. If possible, reach out to their previous year’s teachers for additional feedback. To ensure your remote work success during present times, it is also important to have a discussion with your boss and/or Human Resources department to set and understand expectations. Many employers have programs, like GitLab’s [Family and Friends Day](https://about.gitlab.com/company/family-and-friends-day/) to provide flexible schedules or supportive programs like what’s described in this [GitLab COVID-19 handbook page](/handbook/total-rewards/benefits/covid-19/#sts=Resources%20for%20COVID-19). The more people you talk to, the more data you can collect. And the more data you have now, the more prepared you will be for the next steps. \n\n### 2: Analyze\nOnce you have identified your risks, you can move on to analyzing them. Depending on how many people are in your family, the list of risks identified may be long. In my case, as a family of 7, we had around 15 items on our initial list when we undertook this exercise. As we began analyzing them, however, our list grew to almost 30. \n\nFor us, the easiest way to analyze these risks was to consider the impact these risks had on the family (or the individual) and the likelihood of them recurring. Then we asked why over and over until the true cause is identified. \n\nExample:   \n**Student A (17):** The school provided the students with weekly packets where they read and complete worksheets. Student A was unable to complete many of the assignments and failed 2 classes.  \n**Risk Identified:** Student A is concerned the school will do a similar process (paper packets) and he will continue to fail.  \n**Impact:** If Student A fails another class, he won’t graduate on time.  \n**Likelihood:** Depending on the classes and the course work, this could be highly likely.     \n\nRoot cause analysis: Why did Student A fail?\n* Student A did not complete the packets for 2 of his classes, why?\n* Student A had trouble understanding the content, why?\n* Student A learns better with verbal instructions and opportunities to ask questions. \n\nIn this case, the root cause was that Student A needs more verbal instruction and oversight when being presented with new concepts. \n\nYou may also identify opportunities as part of this process. For example, in our house, Student C preferred using Google Classroom’s To-Do List functionality to track open assignments and was able to easily visualize his tasks. By identifying what went right, in addition to what went wrong, you are able to better shape your treatment plans in the next phase. \n\n### 3: Action\nOnce you have analyzed your risks and identified the root causes, you can move on to the action phase. This phase is often the most difficult to complete. If you knew how to do it the right way, you would have done it correctly in the first place, right? Actually, wrong. We learn a lot from failing! Some of the best plans go through multiple [iterations](https://handbook.gitlab.com/handbook/values/#iteration) before you find the right fit. The important thing is to focus on improvement. \n\nBelow is a snapshot of the action plans I developed with my family: \n\n| **Risk** | **Root cause** | **Treatment plan** |\n|:-------------|:-------------|:-------------|\n| Student A is concerned the school will employ a similar process (paper packets) and he will continue to fail. | Student A learns better with verbal instructions and opportunities to ask questions. | _Iteration 1_: Parent assists Student A in creating a schedule where Parent can review the instruction page with Student A and answer any questions up front. Student A then works on packets for 1 hour. If packet is not completed and/or student has questions, Student A asks Parent for assistance during Parent’s lunch break. \u003Cbr/> _Iteration 2_: If school changes format to online learning using Zoom, Student A will work with teacher on expectations and additional assistance. |\n| Parent is concerned about Student B’s social and emotional well-being. | Student B learns better when she can work in a group with her peers to solve problems. Student B is used to having a classroom of friends to support her. | _Iteration 1_: Parent sets up an iPad for Student B to contact her friends. \u003Cbr/> _Iteration 2_: Teacher sets up breakout rooms in Zoom for collaboration. |\n| Parent is concerned about internet bandwidth. | Up to 7 people are using the wireless to learn and work from home. | _Iteration 1_: Parent increases internet speeds and bandwidth. Parent moves router to offer wired connection to Parent’s laptop. \u003Cbr/> _Iteration 2_: Parent sets up router to support two bands- 2.4ghz and 5ghz. ** \u003Cbr/> _Iteration 3_: Parent replaces older devices that might be bandwidth hogs. \u003Cbr/> _Iteration 4_: Parent coordinates a “no meeting” block during peak school hours with employer. |\n\n** The 2.4ghz network is slower but can reach further. However, 2.4 is very prone to interference (such as microwaves). The 5ghz network is faster, but the signal is weaker. \n\nThe final step in the action phase is to discuss the plan(s) with all parties involved. Being [transparent](https://handbook.gitlab.com/handbook/values/#transparency) with teachers and your employer will be key to your success. In our case, we spoke to each of our children’s teachers and expressed our concerns. In many cases, your child’s teachers can add a lot of value to the action plans. The same is true for your employer. When you surface issues constructively, it allows you to be proactive in your response plan. \n\n### 4: Monitor\nNow that you have your plans in place, you need to find a way to determine if they are working. In order to track your results, you need a measure of success. Remember when I said that each person’s risk appetite is different? The same is true with measures of success. In our case, we decided to measure our children’s success based on two factors: attendance in virtual classes and completion of assignments. For our high school and college-age children, we set a 90% attendance goal with a B average across all classes. For our elementary-age children, we set an 85% attendance goal; however, 95% of assignments must be turned in. Each child also set a “stretch” goal to address something particularly challenging from the Spring. For example, Student B struggles with reading and her progress was stunted due to lack of reading support during the spring semester. She set a personal goal to get back to the level she was at by the end of the first term.\n\nAs you can see, the principles of risk management can be pretty handy in the real world. As you work through these steps with your family, Keeping GitLab’s values [CREDIT](https://handbook.gitlab.com/handbook/values/#credit) in mind can help guide the way. \n\n* **C**ollaborate: No one can solve this alone. \n* **R**esults: Focus on action and growth, not perfection.\n* **E**fficiency: Allow your kids self-learning opportunities, but step in when needed. \n* **D**iversity, Inclusion and Belonging: Build a safe community where everyone has input. This includes your family, their teachers, and your employer(s). \n* **I**teration: We all will fail. At some point, something will go wrong. But that’s ok! Learn from it and reassess the plan. It’s ok to change the plan if it isn’t working. \n* **T**ransparency: Openly discuss how your family is feeling about remote education and work. But remember, as the parent or caregiver, your tone will set the tone for the rest of the family. So be sure to be constructive and positive in your conversations. And, as cheesy as it sounds, print it out and post it! We have schedules, reminders, and signs posted all around our house to transparently communicate the expectations and ensure we are all working together to meet our collective goals. \n\nDoes this plan resonate with you?  Have a suggestion I missed including? Please leave a comment, I’d love to iterate on my family’s approach!\n\nCover image by [August de Richelieu](https://www.pexels.com/@august-de-richelieu) on [Pexels](https://pexels.com/)",[720,9,993],"demo",{"slug":995,"featured":6,"template":680},"applying-risk-management-to-remote-learning","content:en-us:blog:applying-risk-management-to-remote-learning.yml","Applying Risk Management To Remote Learning","en-us/blog/applying-risk-management-to-remote-learning.yml","en-us/blog/applying-risk-management-to-remote-learning",{"_path":1001,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1002,"content":1008,"config":1014,"_id":1016,"_type":14,"title":1017,"_source":16,"_file":1018,"_stem":1019,"_extension":19},"/en-us/blog/ask-gitlab-security-alexander-dietrich",{"title":1003,"description":1004,"ogTitle":1003,"ogDescription":1004,"noIndex":6,"ogImage":1005,"ogUrl":1006,"ogSiteName":667,"ogType":668,"canonicalUrls":1006,"schema":1007},"Ask GitLab Security: Alexander Dietrich","What are the challenges and rewards of working security for a growing, cloud native company? We grill one of our senior security engineers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679726/Blog/Hero%20Images/ask-security-cover.png","https://about.gitlab.com/blog/ask-gitlab-security-alexander-dietrich","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ask GitLab Security: Alexander Dietrich\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2019-06-26\",\n      }",{"title":1003,"description":1004,"authors":1009,"heroImage":1005,"date":1011,"body":1012,"category":720,"tags":1013},[1010],"Heather Simpson","2019-06-26","\nWhat’s it like working to secure one of the most transparent organizations in the world? To be a security practitioner in a highly iterative and agile environment? What does that look like and what kind of people thrive in that environment? It takes a certain individual... curious, analytical, collaborative and dedicated. Of course, there’s more than meets the eye when it comes to our GitLab Security team; they also tackle the hard topics like the age-old 'Is a hotdog a sandwich?' debate, Vim vs Emacs, and Linux distros.\n{: .note}\n\nWe take securing the GitLab product and service and protecting our company very seriously. But, we try not to take ourselves too seriously. We hope you learn something new in [this series](/blog/tags.html#ask-GitLab-security), but that you enjoy yourself too.\n{: .note}\n\n\n![Alexander Dietrich Headshot](https://about.gitlab.com/images/blogimages/dietrich.png){: .small.right.wrap-text} **Name:** Alexander Dietrich\n\n**Title:** Senior security engineer, [Automation](/handbook/security/#security-automation)\n\n**How long have you been at GitLab?** I started in September 2018\n\n**GitLab handle:** [@adietrich](https://gitlab.com/adietrich)\n{: #tanuki-orange}\n\n**Connect with Alexander:** [LinkedIn](https://www.linkedin.com/in/alexanderdietrich/)\n\n\n#### Tell us what you do here at GitLab:\nI create tools for the security department to automate tasks that were previously done mostly manually (or not at all), so we can perform our work more quickly, consistently, and (I hope) delightfully. Security teams are rarely large teams, and security automation focuses on scaling the team.\n\n#### What’s the most challenging or rewarding aspect of your role?\nNothing I have worked on so far has been cookie-cutter; there’s a continuous flow of new technologies to learn and use cases to cover, which I find challenging and rewarding at the same time. GitLab is a cloud native company, so having the full range of services at our disposal to solve a problem can be tempting (and potentially overwhelming), at which point it’s good to remember our [value of efficiency](https://handbook.gitlab.com/handbook/values/#efficiency) and go for the “[boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions).” Your team members will be much happier too, when your PagerDuty, Slack, and GitLab integration is only a few lines of Python running [serverless](/topics/serverless/) and just works.\n\n#### And, what are the top 2-3 initiatives you’re currently focused on?\n- Making sure we meet our remediation goals for security issues, i.e. through automated escalation (if necessary). Some examples:\n     * [Build escalation engine for issues escalation](https://gitlab.com/gitlab-com/gl-security/security-department-meta/issues/383)\n     * [Workflow for escalation engine](https://gitlab.com/gitlab-com/gl-security/security-department-meta/issues/446)\n- Reducing friction for our application security engineers: An example would be the automated import of HackerOne reports directly into GitLab issues or improving our engagement with HackerOne reporters through automated updates and responses.\n- Laying the groundwork for GitLab’s [Zero Trust initiative](/handbook/security/#zero-trust); currently, I’m focusing on building onto our [SSO solution](/handbook/business-technology/okta/).\n\n#### How did you get into security?\nI have been following IT security topics for many years from a defender perspective, due to running things on the internet and an interest in privacy-enhancing technologies. Professionally, I switched to security from a regular software development position, when my previous employer needed a dedicated security team for their development organisation. Suddenly I was responsible not only for secure software development practices, but also awareness of potential threats to our services stack and operational security of our cloud environments. It was very exciting, and I learned a lot, especially about the value of automation.\n\n#### In the past decade, how has your area of expertise changed?\nSignificantly, I started out writing software that was sold in boxes in stores (remember those?) and saw the entire business shift to “cloud native,” with me changing focus from writing software to making sure that software is written and operated securely. Being able to apply my general security-mindedness at work was a great opportunity, and it’s kind of funny to see the trend for security to “shift left,” towards where I’m originally coming from. Nice meeting y’all!\n\n#### From the perspective of your role, what’s GitLab doing better than anyone else in terms of security?\nWhen you consider where GitLab is in its evolution, the size and diversity of the security department demonstrates very clearly that security is not an afterthought here. I love being able to focus on my area of expertise and collaborating with teams that are equally well-staffed and dedicated. Initiatives like Zero Trust and the in-house Red Team also show a proactive attitude towards security, rather than just patching the latest vulnerabilities.\n\n#### What is the most significant piece of security advice you could provide to a colleague or friend?\nUse a password manager and generate unique passwords for everything. That way one website losing your data will not put all your other accounts at risk. Keep your systems updated, so you don’t get bitten by security holes that are years old. Ok, that was two pieces of advice.\n\n#### What do you look forward to the most in security in the next five years?\nI’m anxious to see the industry overcome the dichotomy of security and usability, and secure-by-default becoming the new normal. This might take longer than five years, though.\n\n#### Is there an area of security research you think deserves more attention?\nThe design of decentralized systems that are secure and usable should receive more attention. When we read about the latest mind-boggling data breach, we often overlook the fact that bad operational security may be one cause, but another is the practice of piling up mountains of data in the first place.\n\n#### What is something you advocate as a security professional, but find the most difficult to put into practice personally?\nApplying the “principle of least privilege” is more difficult than I’d like. Giving a user or service only the required amount of permissions for certain tasks is intuitively a sensible strategy, yet doing this in practice is often hampered by obscure systems or documentation. I have yet to encounter a cloud provider with a permission system that is flexible, easy to use and well documented at the same time. It’s no surprise that software engineers tend to take the shortcut of overly broad permissions in this situation, I’m afraid.\n\n#### What's your favorite security research paper or thought leadership piece?\nI’m going to pick “[Tor: The Second-Generation Onion Router](https://svn.torproject.org/svn/projects/design-paper/tor-design.pdf),” because it lays the foundation for a system that provides accessible, secure communication for everyone to this day.\n\n## Now, for the questions you *really* want to have answered:\n\n#### Vim or Emacs?\nVim, because I have at least basic proficiency here. I might dive into Emacs though, if I ever get tired of Linux.\n\n#### Favorite Linux distro?\nI’ve been very happy with Ubuntu, both on the server and desktop, even if they are occasionally a bit “ambitious” with their changes.\n\n#### Is a hotdog a sandwich?\nNo, the geometry is all wrong.\n\n#### Gif or Gif? (Gif or Jif?)\nI’m more concerned about people who pronounce “router” incorrectly, to be honest.\n\n#### What's been your most interesting experience while traveling?\nOther than simply encountering a new place, probably tasting dishes and/or drinks I didn’t know before. When we went to New Orleans for [GitLab Contribute](/blog/contribute-wrap-up/), I was introduced to a whole range of Cajun cuisine that I had never had, which was amazing.\n\n#### What is one food or beverage you can't live without?\nDarjeeling tea\n\n#### Who is your favorite superhero and why?\nAnyone who comes to the aid of their fellow human beings, even at great personal risk.\n\n#### Have a question to ask the GitLab Security team? Leave a comment below!\n",[810,9,720,720],{"slug":1015,"featured":6,"template":680},"ask-gitlab-security-alexander-dietrich","content:en-us:blog:ask-gitlab-security-alexander-dietrich.yml","Ask Gitlab Security Alexander Dietrich","en-us/blog/ask-gitlab-security-alexander-dietrich.yml","en-us/blog/ask-gitlab-security-alexander-dietrich",{"_path":1021,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1022,"content":1027,"config":1032,"_id":1034,"_type":14,"title":1035,"_source":16,"_file":1036,"_stem":1037,"_extension":19},"/en-us/blog/ask-gitlab-security-paul-harrison",{"title":1023,"description":1024,"ogTitle":1023,"ogDescription":1024,"noIndex":6,"ogImage":1005,"ogUrl":1025,"ogSiteName":667,"ogType":668,"canonicalUrls":1025,"schema":1026},"Ask GitLab Security: Paul Harrison","What’s it like working to secure one of the most transparent organizations in the world? Meet our security team.","https://about.gitlab.com/blog/ask-gitlab-security-paul-harrison","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ask GitLab Security: Paul Harrison\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2019-05-31\",\n      }",{"title":1023,"description":1024,"authors":1028,"heroImage":1005,"date":1029,"body":1030,"category":720,"tags":1031},[1010],"2019-05-31","\nWhat’s it like working to secure one of the most transparent organizations in the world?  To be a security practitioner in a highly iterative and agile environment? What does that look like and what kind of people thrive in that environment? It takes a certain individual ... curious, analytical, collaborative and dedicated. Of course, there’s more than meets the eye when it comes to our GitLab Security team; they also tackle the hard topics like the age-old 'Is a hotdog a sandwich?' debate, Vim vs Emacs, and Linux distros.\n{: .note}\n\nWe take securing the GitLab product and service and protecting our company very seriously. But, we try not to take ourselves too seriously. We hope you learn something new in this series, but that you enjoy yourself too.\n{: .note}\n\n\n![Paul Harrison Headshot](https://about.gitlab.com/images/blogimages/paul-harrison-headshot.png){: .small.right.wrap-text} **Name:** Paul Harrison\n\n**Title:** Senior Security Engineer / Interim Security Manager, Security Operations\n\n**How long have you been at GitLab?** I started in January 2019\n\n**GitLab handle:** [@pharrison](https://gitlab.com/pharrison)\n{: #tanuki-orange}\n\n**Connect with Paul:** [LinkedIn](https://www.linkedin.com/in/pharrison33) / [Twitter](https://twitter.com/iyampaul)\n\n\n#### Tell us what you do here at GitLab:\nI’m responsible for defining and implementing the operational security response processes and procedures to handle new and emerging risks to GitLab the company, the product, and GitLab.com. I’m also involved in day-to-day security event handling and engaging with partner teams around GitLab on any related questions or issues.\n\n#### What’s the most challenging or rewarding aspect of your role?\nThe most challenging AND rewarding aspect is helping to design our security posture and working to meet those goals one step at a time. This is incredibly unique and challenging as we’re 100 percent remote, the topography of the company and its environments is constantly iterating, and we want to ensure we hold true to our [values](https://handbook.gitlab.com/handbook/values/) by being as transparent and open as possible.\n\n#### And, what are the top 2-3 initiatives you’re currently focused on?\n- Operational Security Architecture: Designing the end-to-end flow of how security risks, events, and incidents are handled across GitLab. (Handbook MR coming soon!)\n- [Log Aggregation Working Group](/company/team/structure/working-groups/log-aggregation/): Analyzing, documenting, and working with Infrastructure and Development teams to improve the quality and efficiency of logs being produced by GitLab-CE/EE and GitLab.com.\n\n#### How did you get into security?\nDialing into local [BBS](https://en.wikipedia.org/wiki/Bulletin_board_system)s in the early '90s, IRC in the mid-90s, and being introduced to reading material like Phrack, 2600, and other amusing bits at an early and malleable age. Combined with a general interest in discovering how things work, breaking them in the process, and the kind of interesting things you can find!\n\n#### In the past decade, how has your area of expertise changed?\n10 years ago I was almost entirely focused on the security and compliance tools necessary to keep a solid grasp on enterprise email (well ... as best as you can!). From there, I broadened my horizons by taking on security management and architecture of local and remotely hosted environments, then compliance for interesting and terrifying acronyms like GDPR. This has resulted in a decent breadth of knowledge in many areas … and enough to be dangerous in others.\n\n#### What is the most significant piece of security advice you could provide to a colleague or friend?\nPlease, please, please, please use a password manager like 1Password, or LastPass, or Bitwarden (examples, not endorsements, YMMV and pick what fits your workflow best!) and start using it to generate and save unique and difficult passwords for each of your sites or services. You won’t need to remember them and so you don’t need to use a memorable one. Then, while you’re at it, turn on two-factor authentication (2FA), and not that SMS/text message-based one. Use an app like Google Authenticator or Microsoft Authenticator, which will give you the six-digit number (aka Time-Based, One-Time Password) on your mobile device, or better. Having strong, unique passwords and 2FA enabled will significantly decrease the chance of your accounts being compromised.\n\n#### GitLab is very unique in that we strive to be incredibly transparent...about everything. What sort of challenges does that present to you as a security professional? What opportunities?\n**_First, the opportunities:_**\n\nStriving, and for the vast majority of situations, succeeding, at being transparent is a hugely rewarding and helpful experience for both GitLab and the community. At first I was sceptical and from working with very tight-lipped organizations with their well-massaged disclosure communications, my mindset has been to not “air our dirty laundry.” **But, being able to be transparent about vulnerabilities and issues means:**\n- The community can see how we became aware of, handled, and resolved the situation, then subsequently learned from it so we won’t repeat the issue.  This information might help them in their own environment, or their own processes, and, we hope, might also increase their trust in our product!\n- We can give credit to all the awesome, hard-working, and talented people at GitLab who come up with clever and creative solutions to protect our customers and their data. When the issues are public, anyone can see who worked it, came up with the obscure and amazing solution, and deployed it. In most companies this information is part of their secret sauce, but this is something we can, should, and do celebrate.\n\n**_Now, the challenges:_**\n- The need to keep vulnerability details close so our customers have the opportunity to update before it’s being exploited in the wild.\n- Old habits die hard, particularly in the Security community. When the default state in most companies is to lock away and carefully communicate a well-crafted and rehearsed statement, transparency is something to get used to and can be uncomfortable for many people who’ve been in the industry for a while.\n- Determining, and sticking with, the few rare scenarios where, due to the sensitive nature, it is necessary to keep certain data confidential.  Scope creep can be hard.\n\n#### What are your thoughts on cybersecurity bachelor’s degrees as a way to scale training of security professionals?\nWith the premise of a bachelor’s degree being more focused on providing the deep, foundational knowledge and enabling people to continue to learn after completing the degree, a Security-focused bachelor’s program could be valuable. However, the continued learning aspect is absolutely a necessity in this space as, despite [OWASP Top 10](https://www.owasp.org/index.php/Category:OWASP_Top_Ten_Project) (for example) having largely remained unchanged, the rest of the security landscape has shifted tremendously in the last decade. Without having the willingness to grow and the tools at your disposal to understand how to grow, you would have a difficult time.\n\n## Now, for the questions you *really* want to have answered:\n\n#### Favorite Linux distro?\nDebian, specifically Debian Stable. It just works. Fast and reliable for server use and great for a desktop/workstation. I’ve been using Debian since version 5 or 6 and it is always my first choice when setting up a new system.\n\n#### You get one superpower, what is it?\nI’d like to be able to look at any one plant and make it to grow at any speed and to any size I wish. I could make one beanstalk be 100 feet tall and 3 feet wide, or a fully formed spruce tree but scaled down to a foot, all in a matter of seconds!\n\n#### Now, it’s time for the age-old debate: Is a hotdog a sandwich? And, on that note, is a taco a sandwich?\nNeither a hotdog nor a taco are sandwiches! A sandwich is formed by bringing together two distinct pieces of something to hold an object or several objects between them, sandwiched between them one could say! A hotdog or taco are different from a sandwich because in both circumstances the hotdog itself (aka meat-tube) or the taco fillings are inserted into a crevice formed from a single continuous piece of something, which is no longer sandwiching anything but instead is actually formed to enable the holding of the hotdog or taco-fillings.\n\n#### Have a question to ask the GitLab Security team? Leave a comment below!\n",[810,9,720,720],{"slug":1033,"featured":6,"template":680},"ask-gitlab-security-paul-harrison","content:en-us:blog:ask-gitlab-security-paul-harrison.yml","Ask Gitlab Security Paul Harrison","en-us/blog/ask-gitlab-security-paul-harrison.yml","en-us/blog/ask-gitlab-security-paul-harrison",{"_path":1039,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1040,"content":1045,"config":1050,"_id":1052,"_type":14,"title":1053,"_source":16,"_file":1054,"_stem":1055,"_extension":19},"/en-us/blog/ask-gitlab-security-roger-ostrander",{"title":1041,"description":1042,"ogTitle":1041,"ogDescription":1042,"noIndex":6,"ogImage":1005,"ogUrl":1043,"ogSiteName":667,"ogType":668,"canonicalUrls":1043,"schema":1044},"Ask GitLab Security: Roger Ostrander","What’s it like working day and night to kill spam, Bitcoin mining, malware and more? Meet our security team.","https://about.gitlab.com/blog/ask-gitlab-security-roger-ostrander","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ask GitLab Security: Roger Ostrander\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2019-06-14\",\n      }",{"title":1041,"description":1042,"authors":1046,"heroImage":1005,"date":1047,"body":1048,"category":720,"tags":1049},[1010],"2019-06-14","\nWhat’s it like working to secure one of the most transparent organizations in the world?  To be a security practitioner in a highly iterative and agile environment? What does that look like and what kind of people thrive in that environment? It takes a certain individual... curious, analytical, collaborative and dedicated. Of course, there’s more than meets the eye when it comes to our GitLab Security team; they also tackle the hard topics like the age-old 'Is a hotdog a sandwich?' debate, Vim vs Emacs, and Linux distros.\n{: .note}\n\nWe take securing the GitLab product and service and protecting our company very seriously. But, we try not to take ourselves too seriously. We hope you learn something new in this series, but that you enjoy yourself too.\n{: .note}\n\n\n![Roger Ostrander Headshot](https://about.gitlab.com/images/blogimages/rostrander.png){: .small.right.wrap-text} **Name:** Roger Ostrander\n\n**Title:** Senior security engineer, [Abuse Operations](/handbook/security/#abuse-operations)\n\n**How long have you been at GitLab?** I started on Mar. 26, 2018\n\n**GitLab handle:** [@rostrander](https://gitlab.com/rostrander)\n{: #tanuki-orange}\n\n**Connect with Roger:** [LinkedIn](https://www.linkedin.com/in/roger-ostrander-835848ba/) / [Twitter](https://twitter.com/atiaxi)\n\n\n#### Tell us what you do here at GitLab:\nI kill spam, I kill Bitcoin mining, I kill phishing and malware. If it’s bad, I kill it. And this isn’t just removal; I create automated tools that let us detect all these things and stop them ahead of time.\n\n#### What’s the most challenging or rewarding aspect of your role?\nI’m up against everyone in the world out to make a quick buck by spamming, which means it’s an arms race. I improve my detection, they find another way in, I detect that, they respond, etc. It’s definitely a challenge, and the reward is, of course, when I get to just completely wipe out a ton of spam or prevent it from being created in the first place. Who hasn’t seen spam and thought to themselves, “I wish I could do something about that right now”? I can!\n\n#### And, what are the top 2-3 initiatives you’re currently focused on?\n- Snippet spam is currently a big problem, where people will put spammy search terms in our snippets hoping that search engines will pick up on them.  Recently, there was an [API change](https://gitlab.com/gitlab-org/gitlab-ce/issues/41888) that’ll make that a lot easier to deal with.\n\n- Similarly, people create groups with names like “Watch this free movie online,” which are not only spam but also tend to be vectors for malware. So anyone who searches for “watch \u003Cpopular movie here> free” hoping to pirate a film instead gets a link to a big, heaping pile of keylogging. Keeping on top of that is an ongoing priority, because of course there’s plenty of money to be made by taking over someone’s computer.\n\n#### How did you get into security?\nIt started when I interviewed at Reddit nearly a decade ago – at first, I thought it was for an ordinary backend web development position. Then, halfway through the interview when I was talking to the CEO, he said “We’re actually more interested in your machine learning background, to fight spam.” Ever been in a job interview when you realize you’ve been interviewing for the wrong thing the entire time? I had to change gears pretty quickly, but it worked and I’ve been busting up bad actors ever since.\n\n#### In the past decade, how has your area of expertise changed?\nQuite a bit – I started out as a general backend web programmer, got into machine learning and spamfighting, where I learned quite a bit of frontend technology – even if I am terrible at the actual design work that generally accompanies that.\n\nBut the biggest influence on the breadth of my expertise came from a job that wasn’t actually a programming job at all: It was a role as a solutions architect for a NoSQL database company. If that sounds strangely vague to you, then you have some idea of what I did. It was a technical sales position where I’d fly on-site to various customers (some of them household names) and help them set up our product. The sheer amount of “big picture” experience I got from that was invaluable. When you’re programming, it’s very easy to fall into the specific area that you’re working on. Even if you do have backend experience, it’s hard to get a full idea of how something’s rolled out across a whole company, possibly worldwide. So, the sheer scope of that role allowed me to get a more complete view of how an entire system ought to work at the largest possible scale.\n\n#### Tell us about a time when you failed professionally. How did you recover and what did you learn?\nI once banned the entire front page of Reddit. We were dealing with issues similar to what I’m dealing with today, the “watch free movies” kind of spam/malware scenario I described above, and I’d noticed a pattern: Spammers would create a subreddit of their own and populate it with spam, for SEO purposes. So I created a processing script to find that behavior and made a list of all the subreddits they’d posted in and naively assumed they’d only posted to their own. I made a list, but it had several hundred items on it, so I spot checked them and it seemed everything was okay.\n\nSurprise, it wasn’t! They’d posted in pretty much every popular subreddit, meaning my script banned high-profile, high-traffic subreddits. Also this was during the company’s all-hands so every single person in the company was asking “What happened to movies?” My response, of course, was a very calm, “I’M WORKING ON IT!” What I learned from that one was to fully check my results instead of simply spot checking, and that keeping logs of what your destructive scripts have done is mandatory.\n\n#### GitLab is very unique in that we strive to be incredibly transparent... about everything. What sort of challenges does that present to you as a security professional? What opportunities?\nThis is an enormous issue for me, because while I am in the security department my area of expertise is anti-abuse. So, for example, if a vulnerability is reported to security, it won’t initially be public. Generally it’s only made public while it’s fixed. But when I come up with a new tactic for fighting spam… I don’t ever want that to be public! Even, perhaps especially, if it’s a simple tactic. If spammers knew specifically how they were being detected, they’d change their behavior accordingly. So it’s a very difficult balancing act.\n\nEven so, there are opportunities – the snippets API feature, for example, came about because someone outside of GitLab requested it. They wanted to use it for anti-spam purposes just like I did, but the API doesn’t do any spam checking on its own. So that got to be developed in full view with all the benefits transparent development brings, but without giving away any secrets.\n\n## Now, for the questions you *really* want to have answered:\n\n#### Vim or Emacs?\nI learned VI(M) long ago as a practical necessity, and I highly recommend it. Every Unix system everywhere is going to have at least VI on it as a minimum, so if you know how to work with that then you can get something done no matter where you are. Emacs used to be my go-to “IDE”-type editor, but nowadays I generally use more specialized IDEs.\n\n#### Is a hotdog a sandwich?\nMy wife works for the USDA, so she has opinions on this. Legally backed opinions, as it happens. Frankfurters are specifically quoted in policy as a “sandwich type product.” [Citation: United States Department of Agriculture, \"Food Standards and Labeling Policy Book.\"](https://www.fsis.usda.gov/wps/wcm/connect/7c48be3e-e516-4ccf-a2d5-b95a128f04ae/Labeling-Policy-Book.pdf?MOD=AJPERES) And I’m wise enough to agree with my wife. And also the law, I guess.\n\n#### Is a taco a sandwich?\nA taco is just a tacoid in the category of endofoodtors. What’s the problem?\n\n#### Gif or Gif? (Gif or Jif?)\nLook at that pronunciation guide right there in the question. One of those is spelled exactly like the word whose pronunciation is being debated. Just saying.\n\n#### Have a question to ask the GitLab Security team? Leave a comment below!\n",[810,9,720,720],{"slug":1051,"featured":6,"template":680},"ask-gitlab-security-roger-ostrander","content:en-us:blog:ask-gitlab-security-roger-ostrander.yml","Ask Gitlab Security Roger Ostrander","en-us/blog/ask-gitlab-security-roger-ostrander.yml","en-us/blog/ask-gitlab-security-roger-ostrander",{"_path":1057,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1058,"content":1064,"config":1070,"_id":1072,"_type":14,"title":1073,"_source":16,"_file":1074,"_stem":1075,"_extension":19},"/en-us/blog/auto-devops",{"title":1059,"description":1060,"ogTitle":1059,"ogDescription":1060,"noIndex":6,"ogImage":1061,"ogUrl":1062,"ogSiteName":667,"ogType":668,"canonicalUrls":1062,"schema":1063},"What's coming for Auto DevOps","We're working on a number of improvements to GitLab Auto DevOps – here's where it's at and where it's headed.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667050/Blog/Hero%20Images/auto-devops-pipeline-stages.png","https://about.gitlab.com/blog/auto-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What's coming for Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Ward\"}],\n        \"datePublished\": \"2020-04-30\",\n      }",{"title":1059,"description":1060,"authors":1065,"heroImage":1061,"date":1067,"body":1068,"category":675,"tags":1069},[1066],"Chris Ward","2020-04-30","[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) is designed to make CI/CD adoption easier, with baked-in best practices and automation to take care of moving your code seamlessly through the software development lifecycle. If you or your team are new to DevOps, this is a great place to start. We're excited to share some new and [upcoming improvements to Auto DevOps](#coming-soon), but first: \n\nThere is a prerequisite for Auto DevOps, and that's a Kubernetes cluster. This may or may not be an easy step for you to complete, but your team likely has a cluster set up already. If not, [read our getting started guide](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html).\n\nAuto DevOps should be enabled by default, but if it isn't, go to _Settings > CI/CD > Auto DevOps_ and check _Default to Auto DevOps pipeline_. There are a lot of automated stages available, depending on what version and tier of GitLab you use, and which components you add to your Kubernetes cluster.\n\n1.  **Auto Build**: Builds your code using a _Dockerfile_ if your project has one, or a [Heroku buildpack](https://elements.heroku.com/buildpacks) selected based on the programming language you use, but you can manually set it.\n2.  **Auto Test**: Runs any tests included in your codebase, again using a Heroku buildpack.\n3.  **Auto Code Quality**: Runs static analysis and other checks over your code using the [code quality image](https://gitlab.com/gitlab-org/ci-cd/codequality).\n4.  **Auto SAST (Static Application Security Testing)**: Runs static analysis checks focussed on security issues using the [SAST image](https://gitlab.com/gitlab-org/security-products/sast).\n5.  **Auto Dependency Scanning**: Checks for potential security issues on project dependencies using the [dependency scanning image](https://gitlab.com/gitlab-org/security-products/dependency-scanning). \n6.  **Auto License Compliance**: Searches project dependencies for what licenses they use, using the [license compliance image](https://gitlab.com/gitlab-org/security-products/license-management).\n7.  **Auto Container Scanning**: Uses [Clair](https://github.com/quay/clair) to run static analysis and security issue checks on any Docker images used. \n8.  **Auto Review Apps**: Creates a version of an application in a temporary environment for team members to try and review.\n9.  **Auto DAST (Dynamic Application Security Testing)**: Runs further security checks using the [OWASP ZAProxy](https://github.com/zaproxy/zaproxy) tool.\n10. **Auto Deploy**: Deploys an application to a production environment as defined in the Kubernetes environment settings.\n11. **Auto Browser Performance Testing**: Tests the performance of application web pages using the [Sitespeed.io image](https://hub.docker.com/r/sitespeedio/sitespeed.io/).\n12. **Auto Monitoring**: Uses Prometheus to monitor system metrics for a deployed application.\n\n### Recent improvement: Readiness for Kubernetes 1.16 ([#32720](https://gitlab.com/gitlab-org/gitlab/issues/32720))\n\nWe recently reworked Auto DevOps features to [match changes in the Kubernetes 1.16 API](/releases/2020/03/22/gitlab-12-9-released/#auto-devops'-default-postgresql-due-to-change). Nothing you use will change, but behind the scenes, access different API endpoints, and in different ways.\n\n## Coming soon\n\nSeveral improvements are coming to Auto DevOps in our next few releases to ensure that we help your projects conform to the latest DevOps best practices, and integrate with as many of our platform features and external tools as possible.\n\n### Cloud-native buildpacks for Auto DevOps ([#25954](https://gitlab.com/gitlab-org/gitlab/issues/25954))\n\nSince Heroku created the buildback concept in 2011 when using virtual machines was typical, others have adopted the concept, and created their own that suited containers better. This change in need resulted in the Cloud Native Computing Foundation (CNCF) accepting the [Cloud Native Buildpacks project](https://buildpacks.io/) in 2018 to maintain a standard for buildpacks that suits their modern use cases. Also, in 12.10 we've added support to Cloud Native Buildpacks, and will be switching our \"traditional\" Heroku buildpacks to these newer ones in the coming months.\n\n### Running Auto DevOps on air-gapped networks ([#25642](https://gitlab.com/gitlab-org/gitlab/issues/25642))\n\nWhile many of our users have their clusters connected to the internet, we know not all do, and want to offer these customers as many features as possible. As part of GitLab 13.0, we are researching how to give you the ability to configure the locations of dependencies for Auto DevOps stages.\n\n### Upgrade to Helm 3 ([#29038](https://gitlab.com/gitlab-org/gitlab/issues/29038))\n\nWe use [Helm](https://helm.sh/) to deploy packages needed for various stages of the Auto DevOps process. In 13.1 we will upgrade Helm to version 3, which brought a series of significant changes, including removing Tiller as the \"server\" side of Helm.\n\n### NGINX alerts to auto-monitoring in Auto DevOps ([#118788](https://gitlab.com/gitlab-org/gitlab/issues/118788))\n\nNginx is a popular HTTP and reverse proxy server. In 13.0 we will add support for the metrics it exposes to Prometheus for providing alerts to our auto-monitoring feature.\n\n### Add Merge Train support to Auto DevOps ([#121933](https://gitlab.com/gitlab-org/gitlab/issues/121933))\n\n[Merge Trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) are a GitLab feature that let you queue lists of merge requests waiting for merging into a target branch. Auto DevOps doesn't currently support merge trains, but in version 13.1, we will start adding support and helping users get the configuration they need to add their Merge Trains to Auto DevOps workflows.\n\nYou can [read more about merge trains here](/blog/all-aboard-merge-trains/).\n\n## Looking further ahead\n\nThese planned features aside, one other area we are looking to improve is adopting more of a Directed Acyclic Graph (DAG) approach to Auto DevOps pipelines. You will no longer have to wait for one stage to complete before another begins, and you can focus on the results of the stages important to you. Feel free to view and comment on [the open issue](https://gitlab.com/gitlab-org/gitlab/issues/33200).\n\nWe are broadly working to make Auto DevOps work seamlessly with as many other GitLab features as possible, and hope you enjoy the time and insights it gives you.\n\nYou can [read more about Auto DevOps here](/blog/auto-devops-explained/).\n",[109,677,9],{"slug":1071,"featured":6,"template":680},"auto-devops","content:en-us:blog:auto-devops.yml","Auto Devops","en-us/blog/auto-devops.yml","en-us/blog/auto-devops",{"_path":1077,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1078,"content":1084,"config":1092,"_id":1094,"_type":14,"title":1095,"_source":16,"_file":1096,"_stem":1097,"_extension":19},"/en-us/blog/aws-reinvent-recap",{"title":1079,"description":1080,"ogTitle":1079,"ogDescription":1080,"noIndex":6,"ogImage":1081,"ogUrl":1082,"ogSiteName":667,"ogType":668,"canonicalUrls":1082,"schema":1083},"Highlights from AWS re:Invent 2018","Catch up on what GitLab got up to at AWS re:Invent last week! Reinventing pipelines, emerging as a single application, theCUBE interviews, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679994/Blog/Hero%20Images/aws_booth_2018.jpg","https://about.gitlab.com/blog/aws-reinvent-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Highlights from AWS re:Invent 2018\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2018-12-06\",\n      }",{"title":1079,"description":1080,"authors":1085,"heroImage":1081,"date":1087,"body":1088,"category":299,"tags":1089},[1086],"Priyanka Sharma","2018-12-06","\n\nLast week GitLab was at AWS re:Invent 2018, the marquee event for cloud computing in the US. As the frontrunner in the space, Amazon has built re:Invent to be a juggernaut. This year it commanded most of the Las Vegas strip and had over 50,000 attendees. As a first-time visitor myself, I was impressed by the sheer scale and efficiency of the event. I was also thrilled to achieve my personal goal of giving my first talk with a live demo using code and GitLab. As for GitLab, we saw that our company emerged as a leader in the DevOps space with a single application for the whole software development lifecycle.\n\n## Highlights\n\n### Reinventing CI/CD pipelines\n\nOur CEO [Sid Sijbrandij](/company/team/#sytses) and I did a talk and live demo about reinventing CI/CD pipelines using GitLab, [Kubernetes](/solutions/kubernetes/), and EKS. This was our first hint that this re:Invent was going to be special. The talk was bursting at the seams with attendees, as we shared both the challenges of the toolchain crisis engulfing our ecosystem, and about how a single application for the entire DevOps lifecycle can make an improvement of over 200 percent in cycle times. You can [check out the presentation here](https://docs.google.com/presentation/d/1x1g4pfpoaav9lhcYkjAJylLMl-9S0JFTeKXlNF98O-I/edit?usp=sharing).\n\n![Sid Sijbrandij and Priyanka Sharma on stage at AWS re:Invent](https://about.gitlab.com/images/blogimages/aws-2018/aws_2018_sid_talk_stage.jpeg){: .shadow.medium.center}\n\nThe demo, which showed us running a CI/CD pipeline and deploying code to Kubernetes on EKS, is an example of the [cloud native workflows](/topics/cloud-native/) users can push via GitLab. It is such competency that makes Kubernetes on EKS a breeze and is the reason GitLab was awarded the [AWS Partner DevOps Competency Certification](/blog/gitlab-achieves-aws-devops-competency-certification/) to confirm our viability and excellence as a DevOps solution for companies using AWS Cloud.\n\n### Validation for our vision\n\nOur experience at re:Invent was one of validation and emergence. As a company, we saw that our efforts to build the first single application for the entire DevOps lifecycle have paid off and our users resonated with our message. Most folks who came to our booth were aware that GitLab played a part in multiple stages (if not all) of their workflow and many were avid [GitLab CI](/features/continuous-integration/) fans. Gone are the days when [version control](https://docs.gitlab.com/ee/topics/gitlab_flow.html) was the only thing GitLab was associated with.\n\n![Collage from GitLab at AWS re:Invent](https://about.gitlab.com/images/blogimages/aws-2018/aws_booth_collage.jpeg){: .medium.center}\n\nOur VP of Alliances, [Brandon Jung](/company/team/#brandoncjung), [appeared on theCUBE](https://www.youtube.com/watch?v=Ejs5xGAhL8s) with a company called Beacon. As the former head of partnerships at Google Cloud, Brandon has a long history with GitLab. He has seen the company grow over the years and shared how our rocketship ascent across the DevOps lifecycle convinced him of the potential. He said, \"In just over two years, [GitLab became the frontrunner for continuous integration](/blog/gitlab-leader-continuous-integration-forrester-wave/), according to Forrester. That's impressive.\"\n\n### Livestream with The New Stack\n\nI also represented GitLab on [a livestream podcast](https://www.pscp.tv/w/1eaJbODAepnxX) with [The New Stack](https://thenewstack.io/), [Matt Biilmann](https://twitter.com/biilmann?lang=en), CEO of [Netlify](/blog/netlify-launches-gitlab-support/), and [Joe Beda](https://twitter.com/jbeda), founder of [Heptio](https://heptio.com/) and creator of Kubernetes. We discussed GitOps, NoOps, and the toolchain crisis. As Matt wisely said, \"Trust in open source is critical to cloud computing and the ecosystem. Companies like GitLab will keep the players honest.\"\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">GitOps, NoOps and the tool chain crisis. \u003Ca href=\"https://t.co/mtfm8OaYYD\">https://t.co/mtfm8OaYYD\u003C/a>\u003C/p>&mdash; The New Stack (@thenewstack) \u003Ca href=\"https://twitter.com/thenewstack/status/1067881587214184448?ref_src=twsrc%5Etfw\">November 28, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWe thank AWS for creating this amazing ecosystem of end users and practitioners who came together in Vegas last week. Next year will be bigger, better. Until then, see you all at [KubeCon](/events/)! 😃\n",[1090,267,993,277,675,1091,9,745],"CI","kubernetes",{"slug":1093,"featured":6,"template":680},"aws-reinvent-recap","content:en-us:blog:aws-reinvent-recap.yml","Aws Reinvent Recap","en-us/blog/aws-reinvent-recap.yml","en-us/blog/aws-reinvent-recap",{"_path":1099,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1100,"content":1106,"config":1112,"_id":1114,"_type":14,"title":1115,"_source":16,"_file":1116,"_stem":1117,"_extension":19},"/en-us/blog/balanced-piaa",{"title":1101,"description":1102,"ogTitle":1101,"ogDescription":1102,"noIndex":6,"ogImage":1103,"ogUrl":1104,"ogSiteName":667,"ogType":668,"canonicalUrls":1104,"schema":1105},"Announcing a more balanced Proprietary Information and Assignment Agreement","We've amended our PIAA to help our contributors maintain their ability to work on projects that are unrelated to GitLab business, including other open source projects.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670464/Blog/Hero%20Images/gitlab-loves-open-source.jpg","https://about.gitlab.com/blog/balanced-piaa","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing a more balanced Proprietary Information and Assignment Agreement\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jamie Hurewitz\"}],\n        \"datePublished\": \"2017-12-18\",\n      }",{"title":1101,"description":1102,"authors":1107,"heroImage":1103,"date":1109,"body":1110,"category":299,"tags":1111},[1108],"Jamie Hurewitz","2017-12-18","\n\nWe recently [switched from a Contributor License Agreement (CLA) to a Developer's Certificate of\nOrigin (DCO)](/blog/gitlab-switches-to-dco-license/)\nto make it easier for everyone to contribute to GitLab. Now, we're taking our commitment to\nour core tenet, \"everyone can contribute,\" a step further. We're amending our Proprietary\nInformation and Assignment Agreement (PIAA) and putting clarifying processes in\nplace to help our contributors maintain their ability to work on projects that\nare unrelated to GitLab's business, including other open source projects.\n\n\u003C!-- more -->\n\nGitHub [announced the Balanced Employee Intellectual Property Agreement](https://github.com/blog/2337-work-life-balance-in-employee-intellectual-property-agreements)\n(BEIPA), an open source intellectual property (IP) agreement which seeks to take\na more balanced approach to assigning control over IP. We want to\nthank GitHub for taking the lead on a very important conversation. Their new\napproach inspired us to take a closer look at our own PIAA, make improvements to better clarify our\nposition, and encourage our contributors to work on projects outside of GitLab if they want to.\n\nWe [recently launched a Twitter poll](https://twitter.com/gitlab/status/938921270913019904)\nto assess the potential risk IP agreements pose to developers in our community.\nWe found that the majority of developers (85 percent) have a side project and nearly half\n(44 percent) have worried about the IP ownership of that project. Forty-four percent\nsay they have used company resources for a side project, potentially putting them\nat risk of violating their workplace IP agreement.\n\nAt GitLab, we want to give our contributors confidence that their developments\nwill *not* be owned by GitLab simply by virtue of their use of GitLab-issued computers,\nGitLab facilities, or the GitLab source code repository. Furthermore, we want to\nalleviate stress of not knowing whether they are in violation, given that there\nis necessarily some ambiguity about which projects relate to or don't relate to\nour business. So, we are making some changes.\n\nOne of [our values is boring solutions](https://handbook.gitlab.com/handbook/values/#efficiency).\nWith this in mind we looked at either adopting the BEIPA outright or contributing\nto the document. After considerable thought we concluded that it wasn’t possible\nto make either of these approaches work. Accordingly, we focused on improving our\nexisting PIAA.\n\n## Why the change?\n\nThe industry standard for intellectual property agreements tend to assign a broad\nswath of IP to the employer, making it difficult for a contributor to work on\noutside projects without being in violation of the agreement. The most important\npiece of any employee agreement is the definition of what IP is assigned from the\nemployee to the company.\n\nThe industry standard is to define the scope of the IP definition in three buckets:\n\n1. IP that relates to the current or prospective business of the company\n2. IP created by the employee as part of its work for the company\n3. IP created using materials, facilities, funding, or confidential information of the company\n\nWe want to alleviate the unnecessary risks posed to contributors posed by buckets 1 and 3 above.\n\n## What's changing\n\nAs a result of our internal review, we are making three important changes to our PIAA\nand processes related to outside creations developed by our contributors:\n\n1. We have entirely eliminated the section in our PIAA that would grant GitLab ownership\nin developments simply by virtue of the use of GitLab equipment, including\nGitLab-issued computers, GitLab facilities, or GitLab.com as a software\ndevelopment platform.\n\n2. In the event there is concern on our contributor’s behalf that there may be a gray\narea, we have created a process whereby GitLab can confirm that the development is\noutside the scope of GitLab’s business.\n\n3. We have [added plain language text to our publicly viewable Handbook](/handbook/people-group/contracts-probation-periods/#approval-for-outside-projects) that clarifies\nwhen contributors should seek further assurances from GitLab and when\nthey shouldn’t.  \n\nOur goal is to give contributors a way to gain confidence in their ability to pursue\nindependent projects ahead of time, and reduce the risk of potential conflicts down the line.\n",[675,745,9],{"slug":1113,"featured":6,"template":680},"balanced-piaa","content:en-us:blog:balanced-piaa.yml","Balanced Piaa","en-us/blog/balanced-piaa.yml","en-us/blog/balanced-piaa",{"_path":1119,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1120,"content":1126,"config":1132,"_id":1134,"_type":14,"title":1135,"_source":16,"_file":1136,"_stem":1137,"_extension":19},"/en-us/blog/balancing-career-and-baby",{"title":1121,"description":1122,"ogTitle":1121,"ogDescription":1122,"noIndex":6,"ogImage":1123,"ogUrl":1124,"ogSiteName":667,"ogType":668,"canonicalUrls":1124,"schema":1125},"How I balance a baby, a career at GitLab, and cultural expectations of motherhood","One team member shares her experience as a new working mother at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673071/Blog/Hero%20Images/parental-leave-global.jpg","https://about.gitlab.com/blog/balancing-career-and-baby","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How I balance a baby, a career at GitLab, and cultural expectations of motherhood\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jarka Košanová et al\"}],\n        \"datePublished\": \"2019-07-25\",\n      }",{"title":1121,"description":1122,"authors":1127,"heroImage":1123,"date":1129,"body":1130,"category":808,"tags":1131},[1128],"Jarka Košanová et al","2019-07-25","\n_This is the second in a four-part series looking at a myriad of issues surrounding working at home with children. In [part one we took an in-depth look at parental leave policies worldwide](/blog/how-is-it-being-a-new-mom-working-for-gitlab/) and in parts three and four we’ll discover tried-and-true strategies for working remotely with older children._\n\nIn my last post I talked about the big differences among countries when it comes to paid parental leave. But this is only a start. I think maybe even more important is how society sees the issues around parental leave. In my country, women who want to work during the first three years of their child's life are often called \"career chasers\" and considered selfish. The majority opinion is that as a woman, you should prioritize caring for your children and household until your children are at least three years old. A lot of people in the Czech Republic (and elsewhere) think you should give up your old hobbies, stop traveling, and wait to resume your life until your children are older.\n\nYoung people, especially those with higher education or international experience, are usually more tolerant and don't see parenting as so black and white anymore. But I still wondered: Can I work when I have a small baby and still be accepted in my country?\n\nI was sure I wanted to return to work quite soon after having a baby, meaning before the 2-3 years which is \"normal\" in the Czech Republic. I had lived in Switzerland where childminding groups took care of infants and toddlers and women often went back to work four months after birth (or even sooner). I couldn't imagine how I could stay at home with a child, or multiple children, for three or more years without working. I really like my job, so why should I have to get rid of it for three or more years? Why should I forget everything I have learned? But I had no idea how to balance social expectations and my desire to work at that time.\n\n## Flexibility is key\n\nAnd then at GitLab, balancing parenting and work came so naturally. This is perhaps because I was working remotely and that made it much easier. Twelve weeks of parental leave passed quickly. The first 8-10 weeks were crazy, but then it got easier. Whenever our little one was sleeping or playing I had time to work. I started working part time after 12 weeks and I am really happy I was granted this opportunity.\n\nWorking part time has been great for me so far. I am really grateful that working for GitLab offers such a flexible schedule. When our baby was about six months she started moving, but it was not really a problem. I just changed my schedule and I started working two full days and one half day when I have our parents arranged for babysitting (instead of the five half days I had worked before). I actually have rest from the baby while working and rest from work while taking care of the baby.\n\nIn all honesty, if I had the option for more than 12 weeks of parental leave, I would have taken it. I could have applied more leave maybe a bit later in my child's life, because any parent knows that it is a new challenge when a child starts moving. I also can't imagine starting to work full time after those first 12 weeks.\n\n## Still able to contribute\n\nI came to Cape Town with six-month-old Eliška in August 2018, where we had the [GitLab Summit](/blog/gitlab-summit-cape-town-recap/). I was a bit worried about how my husband and I would handle everything but it was amazing. We were able to join in on all excursions and my husband took over for Eliška most of the time so I could enjoy all the activities, including a session about working with kids productively.\n\nI realized that having a child doesn't have to change your life, even in a country where you're \"supposed\" to raise your child full time and not work. Clearly, giving life to a new human being has been a big change in my life. As her parents, my husband and I must support her development, keep her occupied, happy, and safe. But I realized that becoming a mother doesn't mean I have to give up my old life. I can continue working and progress with my career. I can keep my hobbies, such as sport (I just accomplished a half-marathon in Scotland), and my husband and I learned how easy it is to travel with a baby.\n\nAnd the opinion of the Czech society? I have friends and family around who support my decisions, and many say they admire me for continuing to work while raising my daughter. I am pretty sure there are still a lot of people who don't comprehend my decision, but the fact that I work from home for a [family first](https://handbook.gitlab.com/handbook/values/#family-and-friends-first-work-second) company makes my decisions more socially acceptable. My family is also fortunate to have grandparents that help us a lot. In my experience, the GitLab way is simply better for me and my family than the \"traditional Czech way.\" I am happy with how my work and family life is balanced.\n\nWhat do you think about parental/maternity leave around the world and in the US? How has it been working for you and are you happy with your way?\n\n_Next up in our series we look at the practical challenges of managing your physical space while working at home with children._\n\nPhoto by [insung yoon](https://unsplash.com/@insungyoon?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/baby-mobile?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[832,810,9],{"slug":1133,"featured":6,"template":680},"balancing-career-and-baby","content:en-us:blog:balancing-career-and-baby.yml","Balancing Career And Baby","en-us/blog/balancing-career-and-baby.yml","en-us/blog/balancing-career-and-baby",{"_path":1139,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1140,"content":1146,"config":1153,"_id":1155,"_type":14,"title":1156,"_source":16,"_file":1157,"_stem":1158,"_extension":19},"/en-us/blog/benefits-of-corporate-shadow-programs",{"title":1141,"description":1142,"ogTitle":1141,"ogDescription":1142,"noIndex":6,"ogImage":1143,"ogUrl":1144,"ogSiteName":667,"ogType":668,"canonicalUrls":1144,"schema":1145},"Shadow programs give employees a peek into leadership roles","Shadow programs are a great resource if you’re looking to explore new roles, expand your skill set, or learn how decisions are made.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683055/Blog/Hero%20Images/ideaabstract.jpg","https://about.gitlab.com/blog/benefits-of-corporate-shadow-programs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Shadow programs give employees a peek into leadership roles\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2023-07-17\",\n      }",{"title":1141,"description":1142,"authors":1147,"heroImage":1143,"date":1149,"body":1150,"category":810,"tags":1151},[1148],"Fatima Sarah Khalid","2023-07-17","\nHave you ever wished you could see into your manager's mind or understand the nitty-gritty details that make your organization run smoothly? Well, that's exactly what corporate shadow programs can do for you. They enable you to tag along with senior colleagues during regular workdays. You can witness how they tackle tasks, make decisions, and interact with various stakeholders. It's like getting a peek behind the scenes of your organization.\n\nAt GitLab, we host several shadow programs, including one that lets you [learn from GitLab CEO and co-founder Sid Sijbrandij](https://about.gitlab.com/handbook/ceo/shadow/). Each program aims to give team members a deeper understanding of different parts of our company's operations and processes. The experience empowers individuals to connect their work to the company's broader goals and gain valuable skills for their professional growth. Shadow programs benefit mentees, mentors, and organizations.\n\nThe following are some benefits of a shadow program:\n* [Insight into decision-making processes](#insight-into-decision-making-processes)\n* [Collaboration with leadership teams](#collaboration-with-leadership-teams) \n* [Personalized learning opportunities](#personalized-learning-opportunities) \n* [Cross-functional interactions](#cross-functional-interactions) \n\n## Insight into decision-making processes\nShadow programs can act as your backstage pass to the operational aspects of your organization. As you gain more insight into how your organization works and how decisions are made, you may start to appreciate the complexity that goes into keeping things running. Knowing how all the pieces work together will improve how you collaborate. At GitLab, shadows are able to observe [GitLab values](https://handbook.gitlab.com/handbook/values/) in action: Collaboration, Results, Efficiency, Diversity, Inclusion & Belonging, and Transparency.\n\n> From the perspective of operating the company on a daily basis, I was witness to how decisions were made at the leadership level of the organization. There were e-group meetings in which different topics were discussed, from mergers and acquisitions and sales compensation plans to hiring and team member morale. I was impressed by the camaraderie, collaboration, and great rapport among members of the e-group. Decisions were taken only after a thorough discussion had taken place and everyone was encouraged to participate.\n- *[Cesar Saavedra](https://gitlab.com/csaavedra1), [Being a GitLab CEO Shadow](https://www.linkedin.com/pulse/being-gitlab-ceo-shadow-cesar-saavedra/)*\n\n## Collaboration with leadership teams\nMembers of your executive team can sometimes feel like distant figures in your organization. Shadow programs put employees together with executives and other leaders, making them feel more approachable. This connection not only strengthens bonds across various departments and cultivates a positive work environment, but it can also inspire the shadowing employees to feel confident enough to pursue leadership positions themselves.\n\n> The CEO shadow program is such a great way to give team members insight into how the company works, while also making the company feel more inclusive, and its top-level team members feel more approachable. While I’ve always found Sid to be friendly and down to earth, I know that some people are afraid of approaching their manager with something, let alone someone at or near the top. I somewhat jokingly said to someone that it’s a good reminder that our executives are 'real' people.\n- *[Cynthia Ng](https://gitlab.com/cynthia), [Reflection on my CEO shadow rotation at GitLab](https://cynthiang.ca/2022/01/07/reflection-on-my-ceo-shadow-rotation-at-gitlab/)*\n\n## Personalized learning opportunities \nShadow programs at GitLab offer personalized learning opportunities. You can learn on the job from experienced team members, oftentimes from those with roles above yours. The experience is incredibly valuable, as seen from many of the reflections that GitLab team members have written about their shadow program experiences. The program fosters open communication, creating a pathway for better knowledge sharing across teams.\n\nShadowing also provides an opportunity for employees to be exposed to new situations and learn new skills.\n\n> In this meeting, a variety of different vice presidents and engineering managers discussed error budgets, reliability, and security. The [service-level agreement] requirements, security issues, and corrective actions were discussed. We went over what issues are currently affecting our error budgets and must be remediated. Root causes were analyzed and then a plan was made for remediation. This has shown me the efficiency of having all the information on a single document and then discussing proposals to correct. This makes the meeting flow much easier than not having any data beforehand. \n- *[Fernando Diaz](https://gitlab.com/fjdiaz), [What I learned as a Development Director Shadow at GitLab](https://awkwardferny.medium.com/what-i-learned-as-an-engineering-director-shadow-at-gitlab-1a783cb564d0)*\n\n## Cross-functional interactions \nWith a shadow program in another department or role, you will get the opportunity to experience work outside of your immediate team circle. The exposure can help broaden your understanding of the overall organization and how other teams work. You can develop new skills that prepare you for future opportunities. The relationships you build as a shadow across different groups will also stay with you after the program.\n\n> As an open-source contributor, I had some understanding of how GitLab worked. But during the shadowing week, I got to see the inner workings of the company, how teams collaborate, and how the company operates at scale.\n- *[Siddharth Asthana](https://gitlab.com/edith007), [My experience as a GitLab Hero in Developer Director Shadow Program](https://www.linkedin.com/pulse/my-experience-gitlab-hero-developer-director-shadow-program-asthana/)*\n\n## How to start a shadow program\nHere are some tips to help you start a shadow program at your organization:\n* Set clear expectations and guidelines for the program\n* Arrange flexible schedules that suit both the mentee and the mentor\n* Encourage open discussion and feedback\n* Ensure the program is tailored to meet the needs of participants\n* Implement confidentiality guidelines to protect sensitive information\n* Define the tasks for the participants, such as note-taking or updating the handbook\n* Determine the time commitment of rotations\n* Create opportunities for shadows to contribute such as helping to complete tasks\n\nCheck out our shadow programs for examples of how to structure them:\n* [CEO Shadow Program](https://about.gitlab.com/handbook/ceo/shadow/)\n* [Support Shadow Program](https://about.gitlab.com/handbook/support/#support-shadow-program) \n* [Director of Development Shadow Program](https://about.gitlab.com/handbook/engineering/development/shadow/director-shadow-program.html)\n* [CFO Shadow Program](https://about.gitlab.com/handbook/finance/growth-and-development/cfo-shadow-program/)\n\nMore resources:\n* [15 tips to succeed at GitLab's CEO Shadow program](https://about.gitlab.com/blog/get-the-most-out-of-a-ceo-shadow-program/)\n* [CEO Shadow program impressions and takeaways](https://about.gitlab.com/blog/ceo-shadow-impressions-takeaways/)\n* [The engineering director shadow experience at GitLab](https://about.gitlab.com/blog/engineering-director-shadow/)\n\n\n\n\n",[810,1152,9],"growth",{"slug":1154,"featured":6,"template":680},"benefits-of-corporate-shadow-programs","content:en-us:blog:benefits-of-corporate-shadow-programs.yml","Benefits Of Corporate Shadow Programs","en-us/blog/benefits-of-corporate-shadow-programs.yml","en-us/blog/benefits-of-corporate-shadow-programs",{"_path":1160,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1161,"content":1167,"config":1173,"_id":1175,"_type":14,"title":1176,"_source":16,"_file":1177,"_stem":1178,"_extension":19},"/en-us/blog/benefits-of-transparency-in-compliance",{"title":1162,"description":1163,"ogTitle":1162,"ogDescription":1163,"noIndex":6,"ogImage":1164,"ogUrl":1165,"ogSiteName":667,"ogType":668,"canonicalUrls":1165,"schema":1166},"The benefits of transparency in a compliance audit","We’re transparent by default, and just completed our first SOC 2 Type 1 audit! How does our public-first stance affect our compliance efforts and impact an audit?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681258/Blog/Hero%20Images/mvdheuvel-unsplash.jpg","https://about.gitlab.com/blog/benefits-of-transparency-in-compliance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The benefits of transparency in a compliance audit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steve Truong\"}],\n        \"datePublished\": \"2020-04-28\",\n      }",{"title":1162,"description":1163,"authors":1168,"heroImage":1164,"date":1170,"body":1171,"category":720,"tags":1172},[1169],"Steve Truong","2020-04-28","\n\n_Be open about as many things as possible. By making information public we can reduce the barriers to contribution and make collaboration easier. Use public issue trackers, projects, and repositories when possible._ – The GitLab [Handbook](/handbook/)\n\n[Transparency](https://handbook.gitlab.com/handbook/values/#transparency) is one of our core values and a fundamental part of what makes GitLab unique. But openness and [System and Organization Controls 2 (SOC 2)](https://en.wikipedia.org/wiki/System_and_Organization_Controls) audits often don't go hand in hand.\n\n## A quick history of the traditional audit\n\nAt traditional organizations, seeing a high level of transparency with documented procedures and processes publicly available, is not typical. And when the time comes for these organizations to undergo an audit, the process is time-consuming and can be burdensome for many employees who have to balance audit requests on top of their day-to-day work responsibilities.\n\nFor these same employees, a lot of time is spent digging through process documentation, policy documents, and even workflow documents, which is only the starting point for an audit. Eventually, this trickles down to the auditors having to meet with these employees to understand how the processes, policies, and workflows are executed, which means more time is sacrificed bringing an auditor up to speed on internal processes and procedures.\n\n## What does our approach to compliance look like?\n\nAt GitLab, we value transparency so much so that everything we do as a company is [public by default](https://handbook.gitlab.com/handbook/values/#public-by-default). If the need arises for information to be kept private, we have a policy in place which requires GitLab team members to provide justification within our (public) handbook as to why specific information can not be made public.\n\nSpecific to compliance, [handbook pages](/handbook/security/security-assurance/security-compliance/sec-controls.html#list-of-controls-by-family) detail the implemented controls that exist in our environment, including the scope that each control covers, the groups and/or process owners for each control, and even how we’ve mapped these controls to various industry standard compliance frameworks (e.g. ISO, PCI, SOC 2, etc.). Our handbook is the central repository for how we run the company. The change history for any update made to the handbook is available publicly. This means that the wider GitLab community is able to hold us accountable for what we say we do, and how we do it. Our handbook is geared for **self-serve** consumption: readers gain visibility into and understanding of how processes are executed and any nuances that should be considered.\n\nThis same self-serve approach applies to compliance and undergoing an audit, where we aim to enable an auditor to self-serve as much information about our processes and procedures as possible. In this case, we save the time control and process owners would have spent walking an auditor through how these processes are executed internally.\n\nBeyond decreasing the audit workload of control and process owners, transparency earns more frequent feedback and contributions from people outside the company.\n\nThis enables us to collaborate with our valued customers and partners and deepens the industry and community understanding of our business, culture and [values](https://handbook.gitlab.com/handbook/values/#why-have-values). Furthermore, we experience the added benefit of recruiting people who share our values.\n\n## How did our transparent culture impact our first SOC 2 Type 1 audit?\n\nAlmost all information about GitLab and the work that we perform day-to-day is available publicly (with the exception of any data that is considered [not public by default](/handbook/communication/confidentiality-levels/#not-public)). This opens a door that allows external auditors to do more digging and potentially ask questions that we might not be prepared for.\n\nKnowing this helped us know where and when to push back on our auditor to understand how their questions related back to a specific audit requirement.\n\nWe’re happy to say our transparent culture had an overall positive impact on our very first SOC 2 audit. As someone who has operated in an external audit capacity, I would have loved to have been able to perform an external audit over a company that had as much publicly facing documentation as GitLab! **Being transparent about everything we do as a company provided a lot of value during our SOC 2 audit in multiple ways:**\n\n* **Time savings:** In a traditional audit, an auditor schedules meetings with various process owners to walk through how internal processes relevant for an audit (such as change management) are executed. Here at GitLab, these procedures are documented publicly, so our auditor was able to read about these processes and provide more specific follow-up questions which ultimately resulted in team members being able to spend more time performing their day-to-day responsibilities instead of spending time interacting with our auditor.\n* **Understanding documentation gaps:** Based on the deeper follow-up questions asked by our auditor, we were able to identify gaps in our documentation around processes being executed, but without sufficient documentation in the handbook. This gave us an opportunity to improve our documentation and potentially gain time savings in future audit cycles.\n* **Reduced scope creep:** Given the amount of information that is available in our handbook, this may seem counterintuitive. However, because the GitLab architecture is published in our handbook, it helped to clearly define the scope of our SOC 2 audit and helped take the guesswork out of which technology should be subject to audit procedures.\n\nWhile there were many valuable takeaways stemming from our transparency, we also quickly learned that the documentation in our handbook could lead an auditor to preliminary conclusions because processes weren’t fully documented. We didn’t see this as a negative impact, but more as an area for improvement and iteration to further refine our handbook documentation. So, again, the outcome of a deeper understanding of the documentation gaps that existed in our handbook is truly valuable and one we’re already working to address. Here are some merge requests that have been opened as a result of our first SOC 2 Type 1 audit:\n\n* [Revising our Data Classification Policies for more clarity](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/44763/diffs#c5b510796a694dd7bee07591c5baed6b97c18ce4)\n* [GitLab Control Framework (“GCF”) revisions to align controls to SOC 2 Criteria and clean-up control language for clarity](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/44987)\n\n## How do you find the middle ground between transparency and compliance?\n\nTo put it plainly, there is no middle ground.\n\nCompliance is a key function of all organizations. These efforts ensure that the organization and its personnel are adhering to various regulatory obligations by working cross-functionally to implement policies and procedures to meet these requirements. Transparency is actually a powerful compliance driver because it means the wider GitLab community can hold the company accountable for the processes that we put into place and how we carry them out.\n\nBeing transparent *inherently* helps minimize the scope of an audit because it allows us to be specific and detailed about how we operate as a company. We have moved away from a traditional audit model where process owners sit in a meeting with an auditor and spend hours talking. Instead, through our handbook,  the documentation exists in a state that allows someone to pick it up and understand how to execute specific processes and procedures so that there is no guesswork involved. Processes and procedures aren’t left open to interpretation because that would reduce the ability for GitLab team members to \"self-serve\" information. We believe these same benefits extend to deeper understanding and clearer scope definition on the part of an audit team.\n\n## What’s next?\n\nAs a result of our first audit cycle, we have identified areas where we can iterate on our documentation to make it clearer and more concise. We’ll be working to get our handbook up to snuff while continuing to ready ourselves for an SOC 2 Type 2 audit. We are excited to continue exercising our value of transparency to further improve audit efficiency from audit to audit!\n\nBetween revising our documentation to be more clear and concise and preparing for our SOC 2 Type 2 audit, we have also performed a Cloud Security Alliance (CSA) CAIQ v3.1 self-assessment. We chose to perform this self-assessment partly for transparency and partly to provide the broader GitLab community with additional assurance over the maturation of the Security Compliance Program at GitLab. The self-assessment is [available directly on the CSA website](https://cloudsecurityalliance.org/star/registry/gitlab/).\n\n* Interested in learning more about GitLab's Security Certifications and Attestations? Visit our [Security Certifications and Attestations handbook page](/handbook/security/security-assurance/security-compliance/certifications.html) to learn more.\n* **Do you require a copy of GitLab’s SOC 2 report?** See how [potential and existing customers can request this report](/handbook/security/security-assurance/security-compliance/certifications.html#requesting-a-copy-of-the-gitlab-soc2-type-2-report).\n\n\n\nCover image by [Maarten van den Heuvel](https://unsplash.com/@mvdheuvel?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,720],{"slug":1174,"featured":6,"template":680},"benefits-of-transparency-in-compliance","content:en-us:blog:benefits-of-transparency-in-compliance.yml","Benefits Of Transparency In Compliance","en-us/blog/benefits-of-transparency-in-compliance.yml","en-us/blog/benefits-of-transparency-in-compliance",{"_path":1180,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1181,"content":1187,"config":1193,"_id":1195,"_type":14,"title":1196,"_source":16,"_file":1197,"_stem":1198,"_extension":19},"/en-us/blog/best-life-best-work",{"title":1182,"description":1183,"ogTitle":1182,"ogDescription":1183,"noIndex":6,"ogImage":1184,"ogUrl":1185,"ogSiteName":667,"ogType":668,"canonicalUrls":1185,"schema":1186},"Ski first, work later - How to win the burnout battle","How I truly achieved work/life balance with an all-remote async working style.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682336/Blog/Hero%20Images/taylor-peak.jpg","https://about.gitlab.com/blog/best-life-best-work","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ski first, work later - How to win the burnout battle\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-06-07\",\n      }",{"title":1182,"description":1183,"authors":1188,"heroImage":1184,"date":1190,"body":1191,"category":299,"tags":1192},[1189],"Taylor McCaslin","2022-06-07","\n\nIt's 9:13 am and 20 degrees outside in Big Sky, Montana. I'm bundled up in my warm rainbow pride ski suit. Dangling 30 feet in the crisp air, perched on a ski lift, I'm on my way up to a double black diamond run 9,382 feet above sea level. There are few people out this early on a Wednesday morning. I ski off the top of the lift and enjoy a beautifully untracked run of champagne powder snow, fresh from last night's snowstorm. This is a normal start to the workday for me. And I have a bit of a secret to admit, this is exactly why I joined GitLab.\n\n![Taylor on a chair lift at Big Sky Resort](https://about.gitlab.com/images/blogimages/2022-06-04-best-life-best-work/chair-lift.jpg)\n\n## Something's gotta give\n\nRewind two years to January 2020, before I joined GitLab. Before I had materialized my daily skiing routine. Before I moved to Big Sky. Before the global Covid-19 pandemic. I had decided I needed to make a change in my life. I had spent the past decade of my life climbing the startup tech career ladder. Along the way I had sacrificed my health, happiness, and my mental and emotional well-being. I was burnt out. While I don't think I'd change anything going back, I knew the next decade wouldn't sustain that lack of work and life balance. I needed to get back to being the person my friends and family knew: a slim guy with a smile always on his face and a hopeful outlook for the future.\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n## A remote change\n\nGitLab had been on my radar for a number of years as many of my tech friends had become DevOps engineers, but I had not used it myself. What I did know was at the time they were one of the few [truly remote companies with no offices and a global team embracing an async work style](/company/culture/all-remote/tips/#how-it-works-at-gitlab). \n\nWhile I hadn't ever worked remotely before, I knew I liked the idea of not being stuck in a bland office of noisy and distracting open floor layout workspaces surrounded by silly ping pong tables and unlimited snacks. My previous employers thought these things made for a 'supportive environment and 'great work culture'. I couldn't disagree more. It was a scary thought to have less structure, but my previous decade had shown me those offices weren't conducive to my sanity, happiness, or productivity. So I decided, let's go all in.\n\nI knew I wanted to make a big change, so I tested GitLab when I was interviewing. I gauged reactions from my interview panel as I described my desire to move to a ski mountain and balance working and skiing. I was caught by surprise. Every person I interviewed with loved this idea and encouraged me that GitLab's remote and async working style would be supportive of this plan. Just about everyone had a story of how they themselves had adjusted their schedule to add flexibility to their lives. I was convinced. This was the future. \n\n## A global pandemic \n\nTwo months after joining GitLab in January 2020, the pandemic ruined my plans to relocate to a wintry wonderland. I delayed my move, diving into work like many of us did, mainly because there wasn't anything better to do. Fast-forward to December 2020 – it was clear Covid wasn't going away anytime soon. I had gained the Covid 15 lbs. from sheltering in place in my Austin, Texas, apartment for the past nine months. It was time for a change. I needed to prioritize my sanity and health. An outdoor sport like skiing seemed like a relatively low-risk activity. The move was back on. \n\nBy February 2021, I had relocated to Big Sky, 1,600 miles away from the state I had lived in for my entire life. With zero friends and only two bags to a town of 3,000 people. I had visited Big Sky a number of times on ski trips with friends in the years before, and each time the three-to-four day trip never seemed long enough. Now I would be able to call this place my home. \n\n![Welcome sign to Big Sky, Montana](https://about.gitlab.com/images/blogimages/2022-06-04-best-life-best-work/welcome-to-bigsky.jpg)\n\n## A new chapter\n\n2021 was the year of me. I was turning 30, exploring a new life living in a mountain town. It's hard to believe how fast your life can completely change. I went from being depressed and unhappy living a sedentary life in Texas to being out and about on a beautiful mountain nearly daily with a new sense of self. \n\nI've done things I never thought I would, or could, do. I took up hiking. I learned to enjoy the outdoors by visiting Yellowstone National Park, just 30 minutes from my house. I also explored 13 other national and state parks. I learned downhill mountain biking. I rode over 1,000 miles downhill on my mountain bike. I've explored mountains in five states and two countries, and I've skied and biked in the shadow of the Grand Tetons. I skied 186 days at 20 resorts across the last two ski seasons. I went on a combination ski and biking trip to the mountains of Salt Lake City and Moab, Utah, and to the mountains of Canada. Along the way I lost 25 lbs., getting me back to a healthy weight I felt good about. \n\n![Collage of Taylor's adventures while at Gitlab](https://about.gitlab.com/images/blogimages/2022-06-04-best-life-best-work/collage.jpg)\n\n## Happy people do their best work\n\nAnd you know what else is crazy? I've been doing my best work since all this. [GitLab went public](/blog/gitlab-inc-takes-the-devops-platform-public/) last October. I am now establishing and leading [a new machine learning team](/direction/modelops/) at a public company, all from rural Montana. This also presented me with an opportunity to even further embrace remote async life. \n\nWith my new ModelOps team at GitLab, I have a number of team members in APAC, so I decided this past winter to switch to working evenings, embracing some of my favorite GitLab values: [Measure results not hours](https://handbook.gitlab.com/handbook/values/#measure-results-not-hours) and [shifting working hours for a cause](https://handbook.gitlab.com/handbook/values/#shift-working-hours-for-a-cause). This change allows me to ski and mountain bike during the day and, as a night owl, leverage my most productive hours overlapping with more of my colleagues in APAC. \n\n![Taylor biking in Glacier National Park](https://about.gitlab.com/images/blogimages/2022-06-04-best-life-best-work/glacier.jpg)\n\nNow I can join my evening team meetings in person rather than relying on a recording and notes. I always enjoy when I meet with my team members as they always want to know: \"What mountain are you on today?\" It's a simple small talk question, but it's just another way we connect virtually and get to know each other better as people. And, of course, at GitLab we have a Slack channel for everything. I frequently post and share my adventures in #DevSkiOps and #mountainbiking and enjoy swapping photos, tips, and articles with my fellow GitLab skiers and bikers. But let's have the numbers speak for themselves. Here are my '21/'22 ski season metrics, and I can't wait for this summer's mountain biking adventures.  \n\n![Taylor's 2021/22 Ski metrics](https://about.gitlab.com/images/blogimages/2022-06-04-best-life-best-work/slopesapp.jpg)\n\nMy journey to the mountains and switching up my working schedule all showcase my favorite value at GitLab: [Don't wait](https://handbook.gitlab.com/handbook/values/#dont-wait). I think this value applies to our personal lives as much as it does to our professional ones. Life is short, and the pandemic has made that even more real as we've lost so many friends and family so early. Gone are the days of sacrificing your life for 9-5 dead-end jobs. We're realizing life has so much more to offer and employers are increasingly recognizing that happy employees do their best work. \n\nHad you asked me two years ago if I'd see myself living in a small mountain town skiing and biking nearly daily while working at a public company, living my best personal life, and doing the best work of my career, I would have thought you were crazy. But now it's my life. All thanks to the [remote and async lifestyle](/company/culture/all-remote/guide/#the-remote-manifesto) GitLab empowers. And the best part, [we're hiring](/jobs/).\n\n![GitLab Remote Work Promo with Taylor](https://about.gitlab.com/images/blogimages/2022-06-04-best-life-best-work/ski-promo.png)\n",[9,832],{"slug":1194,"featured":6,"template":680},"best-life-best-work","content:en-us:blog:best-life-best-work.yml","Best Life Best Work","en-us/blog/best-life-best-work.yml","en-us/blog/best-life-best-work",{"_path":1200,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1201,"content":1207,"config":1213,"_id":1215,"_type":14,"title":1216,"_source":16,"_file":1217,"_stem":1218,"_extension":19},"/en-us/blog/boring-solutions-faster-iteration",{"title":1202,"description":1203,"ogTitle":1202,"ogDescription":1203,"noIndex":6,"ogImage":1204,"ogUrl":1205,"ogSiteName":667,"ogType":668,"canonicalUrls":1205,"schema":1206},"Want to iterate faster? Choose boring solutions","We’ve released 106 times in 106 months, proof that boring solutions do work when it comes to software development. Here are some of our favorites.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681499/Blog/Hero%20Images/pencils2.jpg","https://about.gitlab.com/blog/boring-solutions-faster-iteration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Want to iterate faster? Choose boring solutions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-08-18\",\n      }",{"title":1202,"description":1203,"authors":1208,"heroImage":1204,"date":1209,"body":1210,"category":787,"tags":1211},[869],"2020-08-18","\n\n*bor-ing* | *bȯr-iŋ*\n\n_Definition of boring: causing weariness and restlessness through lack of interest : causing boredom : tiresome_ –– Merriam-Webster\n\nAt GitLab we’re boring and we’re proud of it. \"Use the simplest and most boring solutions for a problem, and remember that 'boring' should not be conflated with 'bad' or technical debt,\" our [handbook](/handbook/) says. \"The speed of innovation for our organization and product is constrained by the total complexity we have added so far, so every little reduction in complexity helps. Don’t pick an interesting technology just to make your work more fun; using established, popular tech will ensure a more stable and more familiar experience for you and other contributors.\"\n\nAlthough this may seem like counterintuitive behavior at a fast moving software startup, boring solutions are actually grounded in both science and history. [Boyd’s Law of Iteration](https://blog.codinghorror.com/boyds-law-of-iteration/) proves that faster iteration is superior to the quality of iteration. We feel like our history of releasing 106 times in 106 months also proves this point. We’ve managed [to iterate so quickly month after month](/blog/observations-on-how-to-iterate-faster/) *because* we’ve chosen the boring solution.\n\nIf this isn’t enough to convince your team to choose the boring solution more often, we’ve rounded up a slew of boring choices we’ve made to help you make the case (and maybe speed up your software delivery).\n\n## Issue labels\n\nAn early boring solution was the choice to use issue labels to power lists on issue boards. Instead of creating a new system, the boring solution was to use what we had to make a small iteration and get entirely new functionality. (Candidly - this design decision has become a huge pain point, but it’s still a great example of a boring solution) –– [William Chia](/company/team/#williamchia), senior product marketing manager, cloud native & [GitOps](/solutions/gitops/)\n\n## Skip the new UI\nWe created documentation around using [curl](https://curl.haxx.se) against API endpoints instead of creating a new user interface. –– [Nicholas Klick](/company/team/#nicholasklick), backend engineering manager, Configure\n\nWe chose to use a JSON Web Token to authenticate with Vault vs. building out a new UI or CLI. –– [Jackie Meshell](/company/team/#jmeshell), senior product manager, Release:Release Management\n\n## Embrace a small change\nWe recently added awareness [if a security scanner isn’t enabled](https://gitlab.com/gitlab-org/gitlab/-/issues/214392) from the project-level security dashboard. Previously there was no way to know this without going to the Configuration page. While it’s a small change, we’ve received good feedback so far, and hopefully encourages customers to take more advantage of our Gold/ Ultimate offering (and keep their applications safer!) –– [Becka Lippert](/company/team/#beckalippert), product designer, Secure\n\n## Boring = less confusing\nWe spent some time and research deciding among multi-select dropdowns, single-select dropdowns, and plain dropdowns. It was a simple but effective process and prompted team member [Austin Regnery](/company/team/#aregnery), product designer, Manage:Compliance, to comment, “Before joining GitLab I remember reading [this issue](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/issues/443) and being really impressed by both the boring solution and data-driven decision making.\n\n## Boring can also make work easier\nWe made it easier to read the title of an issue without having to scroll back to the top of the page. We initially proposed only the title would stick, but then we did a quick solution validation and found out the MVC was to include the issue status. We paired the first iteration back from a solution that would include other objects (like MRs, epics, etc.) and chose to scope it down just to issues. We also pushed back on making other elements sticky (like the tab nav) [in the first iteration](https://gitlab.com/gitlab-org/gitlab/-/issues/216880). –– [Mike Long](/company/team/#mikelong), product design manager, Plan & Manage\n\n## If boring doesn’t work, abandon it\nIn GitLab’s early days, we used [Gitolite](https://gitolite.com/gitolite/index.html) and the SSH key list. They were boring solutions. They were not elegant but allowed us to focus on adding value. When it no longer worked, we [changed it](/blog/gitlab-without-gitolite/). –– [Sid Sijbrandij](/company/team/#sytses), CEO\n\n## Who needs fancy?\n\nAnd if there’s any doubt that we won’t reach for something shiny when something simple will do, we’ll leave you with these two anecdotes.\n\nWe use SQL for the CI job queue. –– [Stan Hu](/company/team/#stanhu), engineering fellow\n\nAnd, when we made the decision to move from Azure to GCP, we used the most boring solution ever – a checklist (no, really, a checklist) to help us make the process seamless. We made 140 changes to that checklist, all told, but after that careful process, [we were able to migrate from Azure to GCP with no serious issues](/blog/gitlab-journey-from-azure-to-gcp/).\n\n*Read more about faster software delivery:*\n\nPro tips for a faster [CI/CD pipeline](/blog/effective-ci-cd-pipelines/)\n\nKeep your [Kubernetes runners moving](/blog/best-practices-for-kubernetes-runners/)\n\nGet [faster and more flexible pipelines](/blog/directed-acyclic-graph/)\n\nCover image by [Frank Vessia](https://unsplash.com/@frankvex?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,1212,677],"releases",{"slug":1214,"featured":6,"template":680},"boring-solutions-faster-iteration","content:en-us:blog:boring-solutions-faster-iteration.yml","Boring Solutions Faster Iteration","en-us/blog/boring-solutions-faster-iteration.yml","en-us/blog/boring-solutions-faster-iteration",{"_path":1220,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1221,"content":1227,"config":1232,"_id":1234,"_type":14,"title":1235,"_source":16,"_file":1236,"_stem":1237,"_extension":19},"/en-us/blog/breaking-into-security",{"title":1222,"description":1223,"ogTitle":1222,"ogDescription":1223,"noIndex":6,"ogImage":1224,"ogUrl":1225,"ogSiteName":667,"ogType":668,"canonicalUrls":1225,"schema":1226},"How to break into security","Oftentimes, the professional road to security practitioner is a windy one. We talk to 9 women from our Security team to see what their journey looked like.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670847/Blog/Hero%20Images/wocintechchat_blog1.jpg","https://about.gitlab.com/blog/breaking-into-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to break into security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2020-11-12\",\n      }",{"title":1222,"description":1223,"authors":1228,"heroImage":1224,"date":1229,"body":1230,"category":698,"tags":1231},[1010],"2020-11-12","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nThis is post 1 of a 3 part series profiling several women in GitLab’s security organization.  See part two, [\"What’s it like to work in security at GitLab?\"](/blog/whats-it-like-to-work-security-at-gitlab/) and three, [\"Considering a career in security? Here’s some advice.\"](/blog/considering-a-career-in-security/).\n{: .note}\n\nBreaking into technology, and security, can be difficult for anyone. At GitLab [31% of our workforce identifies as women](/handbook/people-group/people-success-performance-indicators/#diversity---women-at-gitlab). In our security department we have nine team members who are women out of a total of 48 team members; that’s 19%.  Global women in tech numbers are around 21.4% according to [CNET](https://www.cnet.com/news/microsofts-first-in-depth-diversity-report-shows-progress-remains-slow/) and this recent study, [“Resetting Tech Culture”](https://www.accenture.com/us-en/blogs/accenture-research/why-tech-is-losing-women-just-when-we-need-them-the-most) indicates that young women who go into tech drop out by the age of 35.  How do we change this?  GitLab is looking to help encourage and support women in tech through our [outbound hiring model](/handbook/hiring/candidate/faq/), [tracking and working toward key metrics](/handbook/people-group/people-success-performance-indicators/#diversity---women-in-management), [inclusion training](/company/culture/inclusion/#diversity-inclusion--belonging-training-and-learning-opportunities), [team member resource groups](/company/culture/inclusion/erg-guide/#how-to-join-current-tmrgs-and-their-slack-channels), building and fostering an [inclusive remote culture](/company/culture/inclusion/building-diversity-and-inclusion/) and [mentorship programs](/company/culture/inclusion/erg-minorities-in-tech/mentoring/).\n\nWhen you’re planning your career and thinking about your professional or academic next steps, it helps to be able to understand the different paths that might take you where you want to go.  This is part 1 of a 3 part series where the 9 women in our Security department will share their backgrounds and experiences, a glimpse into their roles and responsibilities and offer up some tips and advice for those looking to work in tech, and quite possibly, the security industry.\n\n#### We asked: how did you get into security, what helped you *most* in getting to where you are, and how do you stay on top of your game?\n---\n\n### [Julia Lake](/company/team/#julia.lake) - Director, [Security Risk and Compliance](/handbook/security/#assure-the-customer---the-security-assurance-sub-department)\nJoined GitLab April 2020 / Connect with Julia on [LinkedIn](https://www.linkedin.com/in/julia-lake-16843740/)\n\n![Julia Lake](https://about.gitlab.com/images/blogimages/breaking-into-security/jlake_blog1.png){: .shadow.small.left.wrap-text}\n\n**What brought you to work in security?**\nI started my career in retail banking and, after being on the auditee side for a few years, developed an interest in becoming an auditor. So, I returned to school to pursue a degree in management information systems to ensure I had the necessary technical foundations and I began working in IT advisory for one of the big four firms directly after graduating. I’ve been working in security ever since, with additional focus in the privacy and quality domains.\n\n**What helped you *most* in getting to where you are?**\nHaving a professional mentor that proactively encouraged me to take on new challenges has been instrumental to my professional development.\n\n**How do you support your continual growth?**\nI maintain professional relationships with past industry colleagues, meet often with my mentor, proactively solicit feedback from my own leadership and staff, and subscribe to blogs, newsletters, webinars and training covering the field of audit and security from organizations like [MeriTalk](https://www.meritalk.com/news/emerging-tech/cyber-security/) and [ISACA](https://www.isaca.org/resources).\n\n\n### [Jennifer Blanco](/company/team/#jblanco2) - Sr. [Risk and Field Security](/handbook/security/security-assurance/field-security/) Analyst\nJoined GitLab June 2019 / Connect with Jennifer on [LinkedIn](https://www.linkedin.com/in/jenniferblanco1/)\n\n![Jennifer Blanco](https://about.gitlab.com/images/blogimages/breaking-into-security/jblanco_blog1.png){: .shadow.small.right.wrap-text}\n\n**What brought you to work in security?**\nI started my career as a paralegal in civil law for a firm that provided multiple areas of law. In this role I was tasked with building out the software workflow for my department; my first introduction to a technology business solution. A couple years later I moved to Seattle, Washington as I understood it to be an upcoming tech hub and applied for a role in which I could apply my experience. That company was DocuSign, and I’m proud to say I was the first Security Compliance employee ever hired there, back in 2012. I spent four years building out the customer assurance function, external audit programs and third-party risk for engineering dependencies.\n\n**What helped you *most* in getting to where you are?**\nThe customer assessments and audit work at DocuSign are hands-down the biggest propeller for my knowledge journey in Security. I was fortunate to have gotten exposure to, not only security practices, but also the deeply technical aspects of a company that managed their own bare metal infrastructure and networking within a datacenter--by the way, datacenters are SUPER cool if you ever have the opportunity to step foot inside.\n\n**How do you support your continual growth?**\nI have a bachelor’s in communications with an emphasis on research which helped sharpen my critical thinking skills. To strengthen my technical background and support my future goals, I'm currently working on Informatics core classes specializing in assurance and cybersecurity as prerequisites for either a master’s in data science or law degree; to be decided. Professionally, I've curated my path by joining companies where I could expand my knowledge within technology. I’ve also completed a number of bootcamps and training and generally keep up with innovation and industry news.\n\n\n### [Juliet Wanjohi](/company/team/#jwanjohi) - Security Engineer, [Security Automation](/handbook/security/security-engineering/automation/)\nJoined Joined GitLab May 2020 / Connect with Juliet on [LinkedIn](https://www.linkedin.com/in/juliet-wanjohi/) and [Twitter](https://twitter.com/jay_wanjohi)\n\n![Juliet Wanjohi](https://about.gitlab.com/images/blogimages/breaking-into-security/jwanjohl_blog1.png){: .shadow.small.left.wrap-text}\n\n**What brought you to work in security?**\nFrom a very young age, I’ve been interested in computers. When I joined high school, I decided to take computer studies and my teacher for this subject actually became my first mentor and role model as a woman in tech. I later joined the University of Nairobi to study my Bachelor’s in computer science, and as you can imagine, the ratio of women in comparison to men was highly imbalanced. However, being part of the minority did not discourage me and I decided to pursue a master of science degree in cybersecurity in the UK as I had an interest in learning how to protect software applications and build security tools. During my time there, I had the wonderful opportunity to be an [intern](https://about.gitlab.com/blog/what-its-like-to-intern-in-gitlab-security/) within the Security department at GitLab, and progress on to become a full-time security engineer with the team.\n\n**What helped you *most* in getting to where you are?**\nThough my journey into security may not be as long, what has helped me the most is having a network of people around me that support me and encourage me to do better. Cybersecurity is a very broad area and as I become familiar with the different domains and what interests me the most, it is important to have people to reach out to and ask questions no matter how simple the questions may sound to you. In addition, I often find myself to be the only woman/person of color/youngest person in the room. This, bundled with my shy personality, makes it a daunting task to ask questions sometimes. However, I tell myself to be confident in my knowledge and believe in myself. Having this confidence and getting answers to these questions is what will help me to evolve and grow professionally!\n\n**How do you support your continual growth?**\nKeeping in touch with mentors who help me map out my career path and offer feedback is definitely an important support factor for me. Additionally, I enjoy reading blogs by [Troy Hunt](https://www.troyhunt.com/) and [Bruce Schneier](https://www.schneier.com/), listening to podcasts such as [Smashing Security](https://www.smashingsecurity.com/), and attending conferences like [BlackHat](https://www.blackhat.com/) that offer an opportunity to network with diverse groups of people and learn about their experiences in security.\n\n\n### [Kristie Thomas](/company/team/#kristie.thomas) - [Executive Business Administrator](/handbook/eba/)\nJoined GitLab February 2018 / Connect with Kristie on [LinkedIn](https://www.linkedin.com/in/kristiemcgoldrick/)\n\n![Kristie Thomas](https://about.gitlab.com/images/blogimages/breaking-into-security/kthomas_blog1.png){: .shadow.small.right.wrap-text}\n\n**What brought you to work in security?**\nI got started in tech a few years after I graduated from college with a bachelor’s in communications. I knew nothing about technology, business or what my 10 year plan was. I loved experiencing a start-up and felt at home in the fast-paced environment. I grew in my role and saw multiple paths I could take in the industry. In the last 4 years prior to becoming an executive business administrator (EBA), my jobs were more technical. I spent a lot of time writing SQL queries and troubleshooting CI pipelines. I felt pressure to be technical, even though I didn’t enjoy it. I made a list of what I liked and didn’t like, and realized the perfect fit for me would be to move into an EBA role and support the engineering teams at GitLab, allowing me to broaden my skillset while still being involved in technical work, at times. I can truly say that my role as an EBA at GitLab is perfect for me and has exceeded my expectations in many ways.\n\n**What helped you *most* in getting to where you are?**\nIt has always been important for me to be in tune with myself and know what fulfills and exhausts me. Before transitioning to my current role, I took on the exercise of listing and organizing the things I enjoyed and disliked about previous roles and was able to clearly identify what the right position for me would look like. Thankfully, the path I had been on led me to my current role. I’ve always been a curious person and I enjoy engaging with others. I feel fulfilled when others succeed and I am driven to help people meet their goals. I get to do that every day at GitLab, and this role has allowed me to interact with hundreds of team members and form professional relationships. Because I attend a variety of meetings and help with a handful of projects, I get to learn new aspects of the business on a daily basis and am energized by my work.\n\n**How do you support your continual growth?**\nI believe that growth comes from taking care of yourself. I have to prioritize non-work things to succeed at my job. A GitLab colleague recommended the book [Designing Your Life](https://designingyour.life/the-book/) and I have used its principles to find balance, meaning and joy. Along with a lot of self-care, I prioritize a few monthly 1:1s with mentors outside of GitLab.  Talking to others at various stages in their career and getting advice on current challenges has helped me grow, try new things, and solve problems in unique ways. I also feel comfortable making mistakes because this gives me the opportunity to try again with wisdom learned along the way.\n\n\n### [Liz Coleman](/company/team/#lcoleman) - Sr. Security Analyst, [Compliance](/handbook/security/security-assurance/security-compliance/ )\nJoined GitLab January 2020 / Connect with Liz on [LinkedIn](https://www.linkedin.com/in/elizabeth-coleman-5779418b/)\n\n![Liz Coleman](https://about.gitlab.com/images/blogimages/breaking-into-security/lcoleman_blog1.png){: .shadow.small.left.wrap-text}\n\n**What brought you to work in security?**\nThe twists and turns of life are what really guided me to security. It was not something I initially sought out from an educational or professional perspective. In fact, I originally intended to go into politics which took a turn into government compliance and ultimately, information technology and security as it relates to compliance. However I’ve realized over the course of my career that although they are different buckets of work, they are all interconnected in so many ways.\n\n**What helped you *most* in getting to where you are?**\nOne big thing that has helped me to get to this point in my career is being able to identify synergies between my past experiences and new opportunities, and finding those organizations who are able to see how my expertise can span beyond silos. I’ve found that I may have more experience from a compliance perspective but it can apply to auditing. Or understanding IT processes can assist with security initiatives. I don’t think you can go wrong with working hard, being open to learning and trying to surround yourself with quality people that can see your value even if your resume doesn’t 100% match the job description.\n\n**How do you support your continual growth?**\nI obtain, at a minimum, 40 continuing professional education (CPE) credits a year. Typically these are obtained through webinars, e-learning, conferences (pre-Covid), and signing up for anything that I think might be interesting. I also attend [GitLab Commit](/events/commit/)! It’s one of the best opportunities to further immerse myself in all things GitLab and learn about areas of the business that I don’t tend to focus on.\n\n\n### [Meghan Maneval](/company/team/#mmaneval20) - Manager, [Risk and Field Security](/handbook/security/security-assurance/field-security/)\nJoined GitLab July 2020 / Connect with Meghan on [LinkedIn](https://www.linkedin.com/in/meghanmaneval/)\n\n![Meghan Maneval](https://about.gitlab.com/images/blogimages/breaking-into-security/mmaneval_blog1.png){: .shadow.small.right.wrap-text}\n\n**What brought you to work in security?**\nAfter I graduated with a bachelor’s degree in management of technology, I pretty much applied for as many jobs as I could that related to technology. I ended up being offered a job as an IT auditor for an insurance company. After working there for some time, I obtained my master’s in business administration and got the opportunity to lead a dynamic team of auditors and Security Analysts. It was at that point that I realized I wanted to know more about security and pivoted into security compliance.\n\n**What helped you *most* in getting to where you are?**\nHaving a strong mentor that I can speak candidly with. A lot of people think a mentor is the same as your boss, but it’s not. Having an independent person that you can be open and honest with is key. They can guide you through tough situations and provide opportunities to grow.\n\n**How do you support your continual growth?**\nI make it a point to participate in industry events and webinars where I can network and learn from others in my field. In particular, I enjoy attending [ISACA Webinars](https://www.isaca.org/) as they directly relate to my role in governance, risk and compliance. I also really enjoy more vendor-specific user conferences like [Cisco LIVE](https://www.ciscolive.com/) because they generally have tracks specific to security or risk management and it gives great insight into how others use the same tools I do. I also love to read and enjoy reading retrospectives of security incidents and lessons learned from past security events.\n\n\n### [Mitra Jozenazemian](/company/team/#mjozenazemian) - Senior Security Engineer, [Security Incident Response Team](https://handbook.gitlab.com/job-families/security/security-engineer/#sirt---security-incident-response-team)\nJoined GitLab July 2020 / Connect with Mitra on [LinkedIn](https://www.linkedin.com/in/mitra-jozenazemian-0a05233b)\n\n![Mitra Jozenazemian](https://about.gitlab.com/images/blogimages/breaking-into-security/mjozenazemian_blog1.png){: .shadow.small.left.wrap-text}\n\n**What brought you to work in security?**\nI got my bachelor’s degree in information technology. During my studies, I had a security course where I learned about hacking and how to secure systems against hackers. The course made me feel like I was a detective.  As a result of that course, I developed a passion for security so I pursued a master’s in information security and started working as a security engineer in 2010.\n\n**What helped you *most* in getting to where you are?**\nBe open to new experiences. In 2013 a mentor at my university asked me to join his team as a computer forensics researcher and trainer. Before that I’d never done forensic analysis. To prepare, I started to learn how to collect and examine volatile data on a live system while responding to an incident so that I could later teach it. I found it so interesting that I stayed awake for nights and studied and analysed memory/disk images. After that experience, I knew I would love to work on a security team, responding to incidents and trying to find clues of what has happened among the collected evidence.\n\n**How do you support your continual growth?**\nI am always excited for new challenges and the opportunity to participate in something outside of my comfort zone.  I also try to stay up-to-date through IT related newsletters, webinars and training such as [SANS courses](https://www.sans.org/cyber-security-courses/).\n\n\n### [Rupal Shah](/company/team/#rcshah) - Analyst, [Security Compliance](/handbook/security/#security-compliance)\nJoined GitLab October 2020 / Connect with Rupal on [LinkedIn](https://www.linkedin.com/in/rupal-shah-57a384/)\n\n![Rupal Shah](https://about.gitlab.com/images/blogimages/breaking-into-security/rshah_blog1.png){: .shadow.small.right.wrap-text}\n\n**What brought you to work in security?**\nI definitely did not enter the world of Security in a traditional way. I graduated with a MIS (management information systems) degree, but never really pursued it.  I started my career doing customer integrations from an in-house product to a SaaS application and then moved to customer support which led into IT project management.  I was approached with the opportunity to build out and lead a compliance program for SOX (based off of my project management and organizational skills) and that’s how I started my career in compliance and security; literally learning from the ground up!\n\n**What helped you *most* in getting to where you are?**\nBeing a team of only 1.5 for all things related to SaaS-based IT compliance allowed me the opportunity to learn everything about governance, risk and compliance and really get my feet wet.  Building out an entire program to manage SOX, SOC2, risk assessments, third-party vendor security management, etc programs from scratch allowed me to focus on my growth potential and career progression.\n\n**How do you support your continual growth?**\nI try to give myself as much exposure as I can by subscribing to many blogs/newsletters/webinars and attending trainings/conferences when I have the time.  The [Women in Cybersecurity](https://www.wicys.org/) is a great event. I’d also recommend reviewing this [virtual cybersecurity event list from Digital Guardian](https://digitalguardian.com/blog/top-50-must-attend-information-security-conferences). Included in my reading list are [revsec](https://www.revsec.com/blog), [threatstack](https://www.threatstack.com/blog), [csoonline](https://www.csoonline.com/) and [darkreading](https://www.darkreading.com/). I also try to learn about how other parts of the organization function and to identify areas that are lacking; where opportunities to improve security from the lens of the organization and not just a specific department or project may exist. Lastly, staying in touch with my mentor (a previous manager) has enabled my knowledge growth and provided constructive feedback--which makes me work harder and learn from my mistakes.\n\n\n### [Heather Simpson](/company/team/#heather) - Senior External Communications Analyst, [Security Engineering ](/handbook/security/security-engineering/)\nJoined GitLab February 2019 / Connect with Heather on [LinkedIn](https://www.linkedin.com/in/heathersimpson700/) and [Twitter](https://twitter.com/heatherswall)\n\n![Heather Simpson](https://about.gitlab.com/images/blogimages/breaking-into-security/hsimpson_blog1.png){: .shadow.small.left.wrap-text}\n\n**What brought you to work in security?**\nSpoiler alert!  I don’t have a “technical” background and I don’t have even a handful of years working in security.  What I do have is two bachelor’s degrees: international communications and Spanish, a master of science in marketing and close to 15 years experience working in tech.  I came to GitLab from a large IT integrator where I led marketing communications efforts for the office of the CTO. I found that I enjoy collaborating directly with deeply technical folks on marketing initiatives that shine a light on the awesomeness that is their expertise and work.  I’m one of few marketing or communications roles at GitLab that sit directly within the business. This helps me maintain a deeper understanding of the programs, processes and technology we use and the people that make them successful. And, I can always count on my security team members to help me break down the complex and patiently answer my many, many questions so I can gain clarity that I hope is reflected in our external communications.\n\n**What helped you *most* in getting to where you are?**\nI’ve always enjoyed a challenge, which has led me to tackle new roles and new subject matters and areas within the tech industry. It's also given me a love for building new marketing, communications and engagement programs and processes from the ground up.\n\n**How do you support your continual growth?**\nIs twitter an answer? 😆\nI ❤️ reading and try to read at least 60 books each year. I also try and consume as much as I can on platforms like Twitter, LinkedIn and HackerNews around marketing and/or devops and security topics. I appreciate the [Hootsuite blog](https://blog.hootsuite.com/) and Ann Handley's [Total Anarchy](https://archive.aweber.com/totalannarchy) newsletter for marketing topics and try and stay on top of the feeds from many of our bug bounty hunters through this [twitter list](https://twitter.com/i/lists/1296163368252956672). Staying abreast of trends helps me stay sharp even when I’m not regularly or directly practicing those skillsets in a current role.\n\nCover image by [#WOCinTech Chat](https://www.wocintechchat.com/).\n{: .note}\n\n",[720,9],{"slug":1233,"featured":6,"template":680},"breaking-into-security","content:en-us:blog:breaking-into-security.yml","Breaking Into Security","en-us/blog/breaking-into-security.yml","en-us/blog/breaking-into-security",{"_path":1239,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1240,"content":1245,"config":1251,"_id":1253,"_type":14,"title":1254,"_source":16,"_file":1255,"_stem":1256,"_extension":19},"/en-us/blog/building-a-handbook-first-remote-learning-culture",{"title":1241,"description":1242,"ogTitle":1241,"ogDescription":1242,"noIndex":6,"ogImage":690,"ogUrl":1243,"ogSiteName":667,"ogType":668,"canonicalUrls":1243,"schema":1244},"Building a Handbook First Remote Learning Culture","An overview on how to build a handbook first remote learning culture","https://about.gitlab.com/blog/building-a-handbook-first-remote-learning-culture","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building a Handbook First Remote Learning Culture\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Josh Zimmerman\"}],\n        \"datePublished\": \"2020-12-22\",\n      }",{"title":1241,"description":1242,"authors":1246,"heroImage":690,"date":1248,"body":1249,"category":698,"tags":1250},[1247],"Josh Zimmerman","2020-12-22","\n{::options parse_block_html=\"true\" /}\n\nLearning & Development (L&D) is a vital function of any organization’s People or HR team. When most professionals think of L&D, they may remember sitting in the back of a conference room hearing a corporate trainer deliver slides, or maybe accessing self-paced training once or twice a year, or perhaps taking a survey on how to grow their skills. At GitLab, L&D is a huge priority and we do it differently than most organizations! \n\nSince GitLab is [all-remote](https://about.gitlab.com/company/culture/all-remote/) and our [Handbook](https://about.gitlab.com/handbook/) is our primary source of learning, you may be asking yourself, how does L&D create and reinforce a remote learning culture? \n\n[GitLab’s Handbook](https://about.gitlab.com/handbook/) is over 8,000 pages long, and it grows every day. We consider each page to be a source of learning & development material. Pages are for training new team members on GitLab processes, culture, ways of working, and much more. The Handbook is publicly available worldwide, and anyone can [learn about GitLab's Remote working culture](/company/culture/all-remote/building-culture/) and [DevOps](/topics/devops/). It’s a ton to digest, and from a learning perspective, the text-based format can lean heavily on reading and video. For GitLab to scale L&D, we need to make our Handbook more consumable where it is easy to learn new things!  \n\nI joined GitLab eight months ago from management consulting to help build a learning culture. It’s an exciting opportunity. Our team is growing fast. We deliver more resources to the community, and we are helping team members learn more by introducing new handbook first learning modalities. I wanted to share my thoughts on some of the biggest takeaways on building a handbook first remote learning culture. Consider these ingredients to scaling L&D: \n\n## Build a Learning Infrastructure \n\nGitLab’s Handbook is our primary source of training material. Every piece of content pulls from the handbook. As GitLab continues to grow, we needed to invest in a learning technology infrastructure that can enable personalized/self-service learning. By taking material in the handbook, we can apply a [level of interactivity](https://about.gitlab.com/handbook/people-group/learning-and-development/interactive-learning/) to allow various learning styles to consume bite-sized content. We recently invested in a [Learning Experience Platform (LXP)](https://about.gitlab.com/handbook/people-group/learning-and-development/#gitlab-learn-edcast-learning-experience-platform-lxp) by [EdCast](https://gitlab.edcast.com/log_in?auto_sso=true) that will significantly improve our ability to provide certifications, assessments, and self-service learning. \n\nWe also invested in a content library from LinkedIn Learning for off-the-shelf content. Team members will have access to the library for courses that can supplement GitLab’s customized learning content. There's also our use of Articulate 360, which we use to [build interactive handbook first courses](https://about.gitlab.com/handbook/people-group/learning-and-development/interactive-learning/) in the LXP. \n\nThe L&D team has pursued various certification programs that complement our values, such as [Tracom Corporations Social Styles](https://tracom.com/social-style-training/model) facilitator and [Crucial Conversations certification from VitalSmarts](https://www.vitalsmarts.com/crucial-conversations-training/). Our plan is to equip the L&D team with as many tools to design and deliver scalable training. By continuing to invest in learning technologies, we want our team members to know that growing your skills is a top priority for the future of GitLab. \n\n## Design Social Learning Experiences\n\nRemote work can have [some drawbacks](https://about.gitlab.com/company/culture/all-remote/drawbacks/). One of those challenges may be a lack of connection with your coworkers. GitLab L&D uses our live learning courses as an opportunity to build relationships and a sense of community with team members. There may not be a lot of forums outside of [coffee chats](https://about.gitlab.com/company/culture/all-remote/informal-communication/#coffee-chats), [AMAs](https://about.gitlab.com/handbook/communication/ask-me-anything/), [group conversations](https://about.gitlab.com/handbook/group-conversations/), or [1-1 meetings](https://about.gitlab.com/handbook/leadership/1-1/) where team members can **Learn From Others**. We have started to adapt our [live learnings](https://about.gitlab.com/handbook/people-group/learning-and-development/#live-learning) to serve as networking activities where team members work on scenarios in small groups, get to know one another, and share lessons learned. We’ve noticed increased engagement across learners and an atmosphere of encouraging collaboration. Social Learning is the cornerstone of how we will design learning experiences. We can’t expect participants to pay attention to slides for 25 to 50-minute sessions, so we decided to throw out most of them! Team members want to network and build connections during sessions. Why not use learning as a forum to do just that? \n\n## Prioritize Leadership Buy-In and Sponsorship. \n\nGitLab’s CEO, Sid, is very passionate about L&D. He wants to be part of our learning initiatives and share knowledge from his experience growing the organization. Sid has partnered with L&D on recording interviews and [advocating for up-leveling our handbook first learning content](https://about.gitlab.com/handbook/people-group/learning-and-development/#handbook-first-training-content). In order to scale, we receive executive support from Sid and the rest of the e-group on essential initiatives. Our leadership is behind us. Without their support for learning, it would be difficult for L&D to grow and show our people we are invested in them.\n\n## Change Management for Learning & Development\n\nAsking team members to [take time out to learn new skills](https://handbook.gitlab.com/handbook/organizational-change-management/) takes time and energy. Everyone at GitLab is incredibly busy, and carving out time to reskill, and upskill requires a proactive approach. We use GitLab communication vehicles such as Slack channels and Issues to spread various [learning initiatives](https://about.gitlab.com/handbook/people-group/learning-and-development/learning-initiatives/). With the introduction of new tools, technology, initiatives, and courses, L&D has to conduct [continuous change management](https://handbook.gitlab.com/handbook/organizational-change-management/#introduction) with a heavy focus on communications and enablement. Some of those methods include a [monthly continuous learning call](https://about.gitlab.com/handbook/people-group/learning-and-development/learning-initiatives/#monthly-continuous-learning-call-overview) and quarterly newsletter, where we highlight what’s happening in the L&D space. \n\n## Focus on Developing your Leaders\n\nOne of my first initiatives at GitLab was developing a [manager enablement program](https://about.gitlab.com/handbook/people-group/learning-and-development/manager-challenge/). The program’s focus is to reinforce behaviors through a set period of time, 3 weeks, to train our leaders on remote management practices. We applied neuroscience techniques so that participants can learn at their own pace through positive engagement and social learning. We also recognized that learners might have various attention span ranges, so why not create a program that allows participants to complete activities through [daily challenges](https://about.gitlab.com/handbook/people-group/learning-and-development/manager-challenge/#week-1) that take 20 minutes to complete. The program is bite-sized, blended for different learning styles, flexible, and engaging with the focus on equipping managers with critical skills.\n\nBy focusing on managers as a key priority for L&D, we were able to pilot a program and iterate on future deliveries rapidly. We now have a group of managers who are learning ambassadors that can advocate for learning initiatives in the future.\n\n## Reinforce GitLab Values \n\n[C.R.E.D.I.T.](https://handbook.gitlab.com/handbook/values/#credit) is the acronym GitLab uses for our six values. One way for us to reinforce our values is by threading them throughout our curriculum design and development. The values serve as the cornerstone to how GitLab operates as a Remote organization. I’m lucky to work for an organization that takes them so seriously, and it makes my job as an L&D professional easier. By rooting learning in our values, we can reinforce behaviors. \n\n## Prove L&D is a high-value organization\n\nL&D is a relatively new organization within GitLab. Our team considers ourselves strategic enablers. We are striving to develop a mindset that feels responsible for driving strategy and leading change. Think bigger and broader by being proactive in understanding GitLab’s goals, methods, and operations. We have a goal to align every aspect of L&D with the rest of the company. By piloting and [iterating new initiatives](https://about.gitlab.com/handbook/people-group/learning-and-development/learning-initiatives/), we let the organization know that we are here to enable behavioral change that directly increases results!\n\nWe have a colossal charter set out for us in L&D. But with the strong encouragement from our leadership, we know that building a handbook first remote learning culture is top of mind. Hopefully, some of the points outlined in this blog will equip you with a few tips on building a learning culture within your organization. \n\nTo learn more, check out our handbook page, [GitLab Learning and Development](https://about.gitlab.com/handbook/people-group/learning-and-development/), or contact learning@gitlab.com to speak with a member of our team.\n",[811,9,832,267],{"slug":1252,"featured":6,"template":680},"building-a-handbook-first-remote-learning-culture","content:en-us:blog:building-a-handbook-first-remote-learning-culture.yml","Building A Handbook First Remote Learning Culture","en-us/blog/building-a-handbook-first-remote-learning-culture.yml","en-us/blog/building-a-handbook-first-remote-learning-culture",{"_path":1258,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1259,"content":1265,"config":1271,"_id":1273,"_type":14,"title":1274,"_source":16,"_file":1275,"_stem":1276,"_extension":19},"/en-us/blog/building-an-award-winning-culture-at-gitlab",{"title":1260,"description":1261,"ogTitle":1260,"ogDescription":1261,"noIndex":6,"ogImage":1262,"ogUrl":1263,"ogSiteName":667,"ogType":668,"canonicalUrls":1263,"schema":1264},"How we're building an award-winning culture at GitLab","We're proud to see GitLab recognized as one of Inc. Magazine's Best Workplaces in 2019!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670139/Blog/Hero%20Images/gitlab-contribute-team-photo.png","https://about.gitlab.com/blog/building-an-award-winning-culture-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we're building an award-winning culture at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Betsy Church\"}],\n        \"datePublished\": \"2019-05-16\",\n      }",{"title":1260,"description":1261,"authors":1266,"heroImage":1262,"date":1268,"body":1269,"category":299,"tags":1270},[1267],"Betsy Church","2019-05-16","\n\nWe’re delighted to share that GitLab has been named one of [Inc. Magazine’s Best Workplaces in 2019](https://www.inc.com/best-workplaces)!\n\nIn its fourth annual ranking for the private company sector, Inc.’s Best Workplaces list recognizes companies that have created exceptional workplaces through vibrant cultures, employee engagement, and stellar benefits.\n\nAlong with nearly 2,000 other participating companies, GitLab submitted an initial application followed by an anonymous employee survey, which gathered information about our team members’ confidence in the future, management effectiveness, trust, perks, and more.\n\nThere are many reasons we’re proud of the culture we’ve built and continue to sustain at GitLab, but we think it’s best to hear about it straight from our people.\nHere’s what a few of our [team members](/company/team/) from across the globe value most about life at GitLab:\n\n\n> “GitLab has a world-class team and industry-changing product velocity. I'm constantly learning from the people around me, and I've yet to hear anyone reject an idea because ‘It's just too hard.’ As a UX practitioner, we're often used to seeing our efforts get pushed down a backlog, but at GitLab, we see product refinements continually (and quickly) delivered into production. It's exciting and motivating.” [_– Christie Lenneville, UX Director_](/company/team/#clenneville)\n\n\n> “Working for GitLab is about something bigger than myself – it's bigger than my team, it's bigger than the employees – it's about partnering with the entire community to create better software.\nSimultaneously I get to help blaze a new trail – scaling an amazing culture with remote teams from around the globe.”\n[_– Joel Krooswyk, Manager, Customer Success_](/company/team/#JoelKroos)\n\n\n> “There are so many things that make GitLab special.\nTo start, of course, it's the people. I think this is due to the unique way in which we work – totally remotely from all around the globe.\nThere is a better chance of obtaining the best talent for the role when there aren't restrictions placed on location.\nThe flexibility also allows me to have time back for my family and life.\nThe stress is lower, I am happier working, and the overall work-life balance is just better here.”\n[_– Candace Byrdsong Williams, Diversity, Inclusion and Belonging Partner_](/company/team/#cwilliams3)\n\n\n> “Working at GitLab gives me confidence because we work with the highest level of transparency.\nBeing able to work remotely not only saves me on average two hours of daily commute time, but also makes it so efficient to respond to customers on time at any time.” [_– Xiaogang Wen, Solutions Architect_](/company/team/#xiaogang_gitlab)\n\n\n> “I love working at GitLab for a variety of reasons, but the flexibility in creating work-life harmony in my life tops my list.\nI work closely with our executive team here, and they have been so supportive and encouraging when family-related conflicts arise.\nThey are constantly reminding me that “family first” is our mantra, and give me ease of mind to take time away when needed.\nOutside of that, Sid, our co-founder and CEO, told me if it’s a beautiful day out and I just want to go enjoy it, I should do that.\nMoments like these make me so proud to be a part of the GitLab team.” [_– Cheri Holmes, Manager, Executive Assistant_](/company/team/#cheriholmes)\n\n\nWe celebrate this news as many of our team members are returning home from [GitLab Contribute](/events/gitlab-contribute/), the next iteration of our company [summits](/company/culture/contribute/previous/).\nHere's a glimpse of the fun we had together in New Orleans:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xdtPNXtkBhE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nThank you to all of our team members around the globe who contribute to making GitLab a great place to work.\n\nInterested in joining our fast-growing, [all-remote](/company/culture/all-remote/) team? [Check out our vacancies](/jobs/).\n",[832,811,675,9],{"slug":1272,"featured":6,"template":680},"building-an-award-winning-culture-at-gitlab","content:en-us:blog:building-an-award-winning-culture-at-gitlab.yml","Building An Award Winning Culture At Gitlab","en-us/blog/building-an-award-winning-culture-at-gitlab.yml","en-us/blog/building-an-award-winning-culture-at-gitlab",{"_path":1278,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1279,"content":1285,"config":1300,"_id":1302,"_type":14,"title":1303,"_source":16,"_file":1304,"_stem":1305,"_extension":19},"/en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"title":1280,"description":1281,"ogTitle":1280,"ogDescription":1281,"noIndex":6,"ogImage":1282,"ogUrl":1283,"ogSiteName":667,"ogType":668,"canonicalUrls":1283,"schema":1284},"Building GitLab with GitLab: A multi-region service to deliver AI features","Discover how we built our first multi-region deployment for teams at GitLab using the platform's many features, helping create a frictionless developer experience for GitLab Duo users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098664/Blog/Hero%20Images/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type_building-gitlab-with-gitlab-no-type.png_1750098663794.png","https://about.gitlab.com/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: A multi-region service to deliver AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chance Feick\"},{\"@type\":\"Person\",\"name\":\"Sam Wiskow\"}],\n        \"datePublished\": \"2024-09-12\",\n      }",{"title":1280,"description":1281,"authors":1286,"heroImage":1282,"date":1289,"body":1290,"category":1291,"tags":1292},[1287,1288],"Chance Feick","Sam Wiskow","2024-09-12","For GitLab Duo, real-time AI-powered capabilities like [Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) need low-latency response times for a frictionless developer experience. Users don’t want to interrupt their flow and wait for a code suggestion to show up. To ensure GitLab Duo can provide the right suggestion at the right time and meet high performance standards for critical AI infrastructure, GitLab recently launched our first multi-region service to deliver AI features.\n\nIn this article, we will cover the benefits of multi-region services, how we built an internal platform codenamed ‘Runway’ for provisioning and deploying multi-region services using GitLab features, and the lessons learned migrating to multi-region in production.\n\n## Background on the project\n\nRunway is GitLab’s internal platform as a service (PaaS) for provisioning, deploying, and operating containerized services. Runway's purpose is to enable GitLab service owners to self-serve infrastructure needs with production readiness out of the box, so application developers can focus on providing value to customers. As part of [our corporate value of dogfooding](https://handbook.gitlab.com/handbook/values/#results), the first iteration was built in 2023 by the Infrastructure department on top of core GitLab capabilities, such as continuous integration/continuous delivery ([CI/CD](https://about.gitlab.com/topics/ci-cd/)), environments, and deployments.\n\nBy establishing automated GitOps best practices, Runway services use infrastructure as code (IaC), merge requests (MRs), and CI/CD by default.\n\nGitLab Duo is primarily powered by [AI Gateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist), a satellite service written in Python outside of GitLab’s modular monolith written in Ruby. In cloud computing, a region is a geographical location of data centers operated by cloud providers.\n\n## Defining a multi-region strategy\n\nDeploying in a single region is a good starting point for most services, but can come with downsides when you are trying to reach a global audience. Users who are geographically far from where your service is deployed may experience different levels of service and responsiveness than those who are closer. This can lead to a poor user experience, even if your service is well built in all other respects.\n\nFor AI Gateway, it was important to meet global customers wherever they are located, whether on GitLab.com or self-managed instances using Cloud Connector. When a developer is deciding to accept or reject a code suggestion, milliseconds matter and can define the user experience.\n\n### Goals\n\nMulti-region deployments require more infrastructure complexity, but for use cases where latency is a core component of the user experience, the benefits often outweigh the downsides. First, multi-region deployments offer increased responsiveness to the user. By serving requests from locations closest to end users, latency can be significantly reduced. Second, multi-region deployments provide greater availability. With fault tolerance, services can fail over during a regional outage. There is a much lower chance of a service failing completely, meaning users should not be interrupted even in partial failures.\n\nBased on our goals for performance and availability, we used this opportunity to create a scalable multi-region strategy in Runway, which is built leveraging GitLab features.\n\n### Architecture\n\nIn SaaS platforms, GitLab.com’s infrastructure is hosted on Google Cloud Platform (GCP). As a result, Runway’s first supported platform runtime is Cloud Run. The initial workloads deployed on Runway are stateless satellite services (e.g., AI Gateway), so Cloud Run services are a good fit that provide a clear migration path to more complex and flexible platform runtimes, e.g. Kubernetes.\n\nBuilding Runway on top of GCP Cloud Run using GitLab has allowed us to iterate and tease out the right level of abstractions for service owners as part of a platform play in the Infrastructure department.\n\nTo serve traffic from multiple regions in Cloud Run, the multi-region deployment strategy must support global load balancing, and the provisioning and configuration of regional resources. Here’s a simplified diagram of the proposed architecture in GCP:\n\n![simplified diagram of the proposed architecture in GCP](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098671/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098671612.png)\n\nBy replicating Cloud Run services across multiple regions and configuring the existing global load balancing with serverless network endpoint group (NEG) backends, we’re able to serve traffic from multiple regions. For the remainder of the article, we’ll focus less on specifics of Cloud Run and more on how we’re building with GitLab.\n\n## Building a multi-region platform with GitLab\n\nNow that you have context about Runway, let's walk through how to build a multi-region platform using GitLab features.\n\n### Provision\n\nWhen building an internal platform, the first challenge is provisioning infrastructure for a service. In Runway, Provisioner is the component that is responsible for maintaining a service inventory and managing IaC for GCP resources using Terraform.\n\nTo provision a service, an application developer will open an MR to add a service project to the inventory using git, and Provisioner will create required resources, such as service accounts and identity and access management policies. When building this functionality with GitLab, Runway leverages [OpenID Connect (OIDC) with GPC Workload Identity Federation](https://docs.gitlab.com/ee/ci/cloud\\_services/google\\_cloud/) for managing IaC.\n\nAdditionally, Provisioner will create a deployment project for each service project. The purpose of creating separate projects for deployments is to ensure the [principle of least privilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/) by authenticating as a GCP service account with restricted permissions. Runway leverages the [Projects API](https://docs.gitlab.com/ee/api/projects.html) for creating projects with [Terraform provider](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs).\n\nFinally, Provisioner defines variables in the deployment project for the service account, so that deployment CI jobs can authenticate to GCP. Runway leverages [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) and [Job Token allowlist](https://docs.gitlab.com/ee/ci/jobs/ci\\_job\\_token.html\\#add-a-group-or-project-to-the-job-token-allowlist) to handle authentication and authorization.\n\nHere’s a simplified example of provisioning a multi-region service in the service inventory:\n\n```\n{\n  \"inventory\": [\n    {\n      \"name\": \"example-service\",\n      \"project_id\": 46267196,\n      \"regions\": [\n        \"europe-west1\",\n        \"us-east1\",\n        \"us-west1\"\n      ]\n    }\n  ]\n}\n```\n\nOnce provisioned, a deployment project and necessary infrastructure will be created for a service.\n\n### Configure\n\nAfter a service is provisioned, the next challenge is the configuration for a service. In Runway, [Reconciler](https://gitlab.com/gitlab-com/gl-infra/platform/runway/runwayctl) is a component that is responsible for configuring and deploying services by aligning the actual state with the desired state using Golang and Terraform.\n\nHere’s a simplified example of an application developer configuring GitLab CI/CD in their service project:\n\n```\n# .gitlab-ci.yml\nstages:\n  - validate\n  - runway_staging\n  - runway_production\n\ninclude:\n  - project: 'gitlab-com/gl-infra/platform/runway/runwayctl'\n    file: 'ci-tasks/service-project/runway.yml'\n    inputs:\n      runway_service_id: example-service\n      image: \"$CI_REGISTRY_IMAGE/${CI_PROJECT_NAME}:${CI_COMMIT_SHORT_SHA}\"\n      runway_version: v3.22.0\n\n# omitted for brevity\n```\n\nRunway provides sane default values for configuration that are based on our experience in delivering stable and reliable features to customers. Additionally, service owners can configure infrastructure using a service manifest file hosted in a service project. The service manifest uses JSON Schema for validation. When building this functionality with GitLab, Runway leverages [Pages](https://docs.gitlab.com/ee/user/project/pages/) for schema documentation.\n\nTo deliver this part of the platform, Runway leverages [CI/CD templates](https://docs.gitlab.com/ee/development/cicd/templates.html), [Releases](https://docs.gitlab.com/ee/user/project/releases/), and [Container Registry](https://docs.gitlab.com/ee/user/packages/container\\_registry/) for integrating with service projects.\n\nHere’s a simplified example of a service manifest:\n\n```\n# .runway/runway-production.yml\napiVersion: runway/v1\nkind: RunwayService\nspec:\n container_port: 8181\n regions:\n   - us-east1\n   - us-west1\n   - europe-west1\n\n# omitted for brevity\n```\n\nFor multi-region services, Runway injects an environment variable into the container instance runtime, e.g. RUNWAY\\_REGION, so application developers have the context to make any downstream dependencies regionally-aware, e.g. Vertex AI API.\n\nOnce configured, a service project will be integrated with a deployment project.\n\n### Deploy\n\nAfter a service project is configured, the next challenge is deploying a service. In Runway, Reconciler handles this by triggering a deployment job in the deployment project when an MR is merged to the main branch. When building this functionality with GitLab, Runway leverages [Trigger Pipelines](https://docs.gitlab.com/ee/ci/triggers/) and [Multi-Project Pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream\\_pipelines.html\\#multi-project-pipelines) to trigger jobs from service project to deployment project.\n\n![trigger jobs from service project to deployment project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098671612.png)\n\nOnce a pipeline is running in a deployment project, it will be deployed to an environment. By default, Runway will provision staging and production environments for all services. At this point, Reconciler will apply any Terraform resource changes for infrastructure. When building this functionality with GitLab, Runway leverages [Environments/Deployments](https://docs.gitlab.com/ee/ci/environments/) and [GitLab-managed Terraform state](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform\\_state.html) for each service.\n\n![Reconciler applies any Terraform resource changes for infrastructure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098671614.png)\n\nRunway provides default application metrics for services. Additionally, custom metrics can be used by enabling a sidecar container with OpenTelemetry Collector configured to scrape Prometheus and remote write to Mimir. By providing observability out of the box, Runway is able to bake monitoring into CI/CD pipelines.\n\nExample scenarios include gradual rollouts for blue/green deployments, preventing promotions to production when staging is broken, or automatically rolling back to previous revision when elevated error rates occur in production.\n\n![Runway bakes monitoring into CI/CD pipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098671615.png)\n\nOnce deployed, environments will serve the latest revision of a service. At this point, you should have a good understanding of some of the challenges that will be encountered, and how to solve them with GitLab features.\n\n## Migrating to multi-region in production\n\nAfter extending Runway components to support multi-region in Cloud Run, the final challenge was migrating from AI Gateway’s single-region deployment in production with zero downtime. Today, teams using Runway to deploy their services can self-serve on regions making a multi-region deployment just as simple as a single-region deployment. \n\nWe were able to iterate on building multi-region functionality without impacting existing infrastructure by using semantic versioning for Runway. Next, we’ll share some learnings from the migration that may inform how to operate services for an internal multi-region platform.\n\n### Dry run deployments\n\nIn Runway, Reconciler will apply Terraform changes in CI/CD. The trade-off is that plans cannot be verified in advance, which could risk inadvertently destroying or misconfiguring production infrastructure. To solve this problem, Runway will perform a “dry run” deployment for MRs.\n\n![\"Dry run\" deployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098671616.png)\n\nFor migrating AI Gateway, dry run deployments increased confidence and helped mitigate risk of downtime during rollout. When building an internal platform with GitLab, we recommend supporting dry run deployments from the start.\n\n### Regional observability\n\nIn Runway, existing observability was aggregated by assuming a single-region deployment. To solve this problem, Runway observability was retrofitted to include a new region label for Prometheus metrics.\n\nOnce metrics were retrofitted, we were able to introduce service level indicators (SLIs) for both regional Cloud Run services and global load balancing. Here’s an example dashboard screenshot for a general Runway service:\n\n![dashboard screenshot for a general Runway service](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098671617.png)\n\n***Note:** Data is not actual production data and is only for illustration purposes.*\n\nAdditionally, we were able to update our service level objectives (SLOs) to support regions. As a result, service owners could be alerted when a specific region experiences an elevated error rate, or increase in response times.\n\n![screenshot of alerts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098671617.png)\n\n***Note:** Data is not actual production data and is only for illustration purposes.*\n\nFor migrating AI Gateway, regional observability increased confidence and helped provide more visibility into new infrastructure. When building an internal platform with GitLab, we recommend supporting regional observability from the start.\n\n### Self-service regions\n\nThe Infrastructure department successfully performed the initial migration of multi-region support for AI Gateway in production with zero downtime. Given the risk associated with rolling out a large infrastructure migration, it was important to ensure the service continued working as expected.\n\nShortly afterwards, service owners began self-serving additional regions to meet the growth of customers. At the time of writing, [GitLab Duo](https://about.gitlab.com/gitlab-duo/) is available in six regions around the globe and counting. Service owners are able to configure the desired regions, and Runway will provide guardrails along the way in a scalable solution.\n\nAdditionally, three other internal services have already started using multi-region functionality on Runway. Application developers have entirely self-served functionality, which validates that we’ve provided a good platform experience for service owners. For a platform play, a scalable solution like Runway is considered a good outcome since the Infrastructure department is no longer a blocker.\n\n## What’s next for Runway\n\nBased on how quickly we could iterate to provide results for customers, the SaaS Platforms department has continued to invest in Runway. We’ve grown the Runway team with additional contributors, started evolving the platform runtime (e.g. Google Kubernetes Engine), and continue dogfooding with tighter integration in the product.\n\nIf you’re interested in learning more, feel free to check out [https://gitlab.com/gitlab-com/gl-infra/platform/runway](https://gitlab.com/gitlab-com/gl-infra/platform/runway).\n\n## More Building GitLab with GitLab\n- [Why there is no MLOps without DevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n- [Stress-testing Product Analytics](https://about.gitlab.com/blog/building-gitlab-with-gitlab-stress-testing-product-analytics/)\n- [Web API Fuzz Testing](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n- [How GitLab.com inspired Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n- [Expanding our security certification portfolio](https://about.gitlab.com/blog/building-gitlab-with-gitlab-expanding-our-security-certification-portfolio/)\n","Engineering",[109,1293,1090,9,1294,1295,1296,1297,1298,1299],"CD","tutorial","performance","google","git","DevSecOps","AI/ML",{"slug":1301,"featured":91,"template":680},"building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","content:en-us:blog:building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","Building Gitlab With Gitlab A Multi Region Service To Deliver Ai Features","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"_path":1307,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1308,"content":1314,"config":1321,"_id":1323,"_type":14,"title":1324,"_source":16,"_file":1325,"_stem":1326,"_extension":19},"/en-us/blog/building-gitlab-with-gitlab-api-fuzzing-workflow",{"title":1309,"description":1310,"ogTitle":1309,"ogDescription":1310,"noIndex":6,"ogImage":1311,"ogUrl":1312,"ogSiteName":667,"ogType":668,"canonicalUrls":1312,"schema":1313},"Building GitLab with GitLab: Web API Fuzz Testing","Our new series shows how we dogfood new DevSecOps platform features to ready them for you. First up, security testing.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659740/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type.png","https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: Web API Fuzz Testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Eddington\"},{\"@type\":\"Person\",\"name\":\"Eugene Lim\"}],\n        \"datePublished\": \"2023-05-09\",\n      }",{"title":1309,"description":1310,"authors":1315,"heroImage":1311,"date":1318,"body":1319,"category":743,"tags":1320},[1316,1317],"Mike Eddington","Eugene Lim","2023-05-09","\n\nAt GitLab, we try to [dogfood everything](/handbook/product/product-processes/#dogfood-everything) to help us better understand the product, pain points, and configuration issues. We use what we learn to build a more efficient, feature-rich platform and user experience. In this first installment of our “Building GitLab with GitLab” series, we will focus on security testing. We constantly strive to improve our security testing coverage and integrate it into our DevSecOps lifecycle. These considerations formed the motivation for the API fuzzing dogfooding project at GitLab. By sharing our lessons from building this workflow, we hope other teams can also learn how to integrate GitLab’s Web API Fuzz Testing and solve some common challenges.\n\n## What is Web API Fuzz Testing?\n\nWeb API Fuzz Testing involves generating and sending various unexpected input parameters to a web API in an attempt to trigger unexpected behavior and errors in the API backend. By analyzing these errors, you can discover bugs and potential security issues missed by other scanners that focus on specific vulnerabilities. GitLab's Web API Fuzz Testing complements and should be run in addition to GitLab Secure’s other security scanners such as static application security testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) and dynamic application security testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)) APIs.\n\n## Auto-generating an OpenAPI specification\nTo run the Web API Fuzzing Analyzer, you need one of the following:\n* OpenAPI Specification - Version 2 or 3\n* GraphQL Schema\n* HTTP Archive (HAR)\n* Postman Collection - Version 2.0 or 2.1\n\nAt the start of the API fuzzing project, the [API Vision working group](/company/team/structure/working-groups/api-vision/) was also working on an issue to automatically document [GitLab’s REST API endpoints in an OpenAPI specification](https://gitlab.com/groups/gitlab-org/-/epics/8636), so we worked with our colleague Andy Soiron on implementing it. Because GitLab uses the [grape](https://github.com/ruby-grape/grape) API framework, Andy had already identified and [tested](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/95877) the [grape-swagger](https://github.com/ruby-grape/grape-swagger) gem that auto-generates an OpenAPI v2 specification based on existing grape annotations. For example, the following API endpoint code:\n\n```\n     Class.new(Grape::API) do\n       format :json\n       desc 'This gets something.'\n       get '/something' do\n         { bla: 'something' }\n       end\n       add_swagger_documentation\n     end\n``` \nWill be parsed by grape-swagger into:\n\n```\n{\n  // rest of OpenAPI v2 specification\n  …\n  \"paths\": {\n    \"/something\": {\n      \"get\": {\n        \"description\": \"This gets something.\",\n        \"produces\": [\n          \"application/json\"\n        ],\n        \"operationId\": \"getSomething\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"This gets something.\"\n          }\n        }\n      }\n    }\n  }\n}\n```\n\n\nHowever, with almost 2,000 API operations with different requirements and formats, a lot of additional work needed to be done to resolve edge cases that did not meet the requirements of grape-swagger or the OpenAPI format. For example, one simple case was API endpoints that accept file parameters, such as the [upload metric image endpoint](https://docs.gitlab.com/ee/api/issues.html#upload-metric-image). GitLab uses the [Workhorse](https://gitlab.com/gitlab-org/gitlab/tree/master/workhorse) smart reverse proxy to handle \"large\" HTTP requests such as file uploads. As such, file parameters must be of the type WorkhorseFile:\n\n\n```\nnamespace ':id/issues/:issue_iid/metric_images' do\n            …\n            desc 'Upload a metric image for an issue' do\n              success Entities::IssuableMetricImage\n            end\n            params do\n              requires :file, type: ::API::Validations::Types::WorkhorseFile, desc: 'The image file to be uploaded'\n              optional :url, type: String, desc: 'The url to view more metric info'\n              optional :url_text, type: String, desc: 'A description of the image or URL'\n            end\n            post do\n              require_gitlab_workhorse!\n```\n\nBecause grape-swagger does not recognize what OpenAPI type WorkhorseFile corresponds to, it excludes the parameter from its output. We fixed this by adding a grape-swagger-specific documentation to override the type during generation:\n\n```\n             requires :file, type: ::API::Validations::Types::WorkhorseFile, desc: 'The image file to be uploaded', documentation: { type: 'file' }\n```\n\nHowever, not all edge cases could be resolved with a simple match-and-replace in the grape annotations. For example, Ruby on Rails supports wildcard segment parameters. A route like `get 'books/*section/:title'` would match`books/some/section/last-words-a-memoir`. In addition, the URI would be parsed such that the `section` path parameter would have the value `some/section` and the `title` path parameter would have the value `last-words-a-memoir`.\n\nCurrently, grape-swagger does not recognize these wildcard segments as path parameters. For example, the route would generate:\n\n```\n\"paths\": {\n  \"/api/v2/books/*section/{title}\": {\n    \"get\": {\n    ...\n      \"parameters\": [\n         {\n           \"in\": \"query\", \"name\": \"*section\"\n           ...\n  }\n}\n```\n\nInstead of the expected:\n\n```\n\"paths\": {\n  \"/api/v2/books/{section}/{title}\": {\n    \"get\": {\n    ...\n      \"parameters\": [\n         {\n           \"in\": \"path\", \"name\": \"section\"\n           ...\n  }\n}\n```\n\nAs such, we also needed to make several patches to grape-swagger, which we forked while waiting for the changes to be accepted upstream. Nevertheless, with lots of careful checking and cooperation across teams, we managed to get the OpenAPI specification generated for most of the endpoints.\n\n## Performance tuning\n\nWith the OpenAPI specification, we could now begin with the API fuzzing. GitLab already uses the [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) feature to generate testing environments for some feature changes, providing a readily available fuzzing target. However, given the large number of endpoints, it would be impossible to expect a standard shared runner to complete fuzzing in a single job. The Web API Fuzz Testing documentation includes a [performance tuning section](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/#performance-tuning-and-testing-speed) that recommends the following:\n\n* using a multi-CPU Runner\n* excluding slow operations\n* splitting a test into multiple jobs\n* excluding operations in feature branches, but not default branch\n\nThe first recommendation was easy to implement with a dedicated fuzzing runner. We recommend doing this for large scheduled fuzzing workflows, especially if you select the Long-100 fuzzing profile. We also began excluding slow operations by checking the job logs for the time taken to complete each operation. Along the way, we identified other endpoints that needed to be excluded, such as the [revoke token endpoint](https://docs.gitlab.com/ee/api/personal_access_tokens.html#revoke-a-personal-access-token) that prematurely ended the fuzzing session.\n\nSplitting the test into multiple jobs took the most effort due to the requirements of the OpenAPI format. Each OpenAPI document includes a required set of objects and fields, so it is not simply a matter of splitting after a fixed number of lines. Additionally, each operation relies on entities defined in the definitions object, so we needed to ensure that when splitting the OpenAPI specification, the entities required by the endpoints were included. We also wrote a quick script to fill the example parameter data with actual data from the testing environment, such as project IDs.\n\nWhile it was possible to run these scripts locally, then push the split jobs and OpenAPI specifications to the repository, this created a large number of changes every time we updated the original OpenAPI specification. Instead, we adapted the workflow to use dynamically generated child pipelines that would split the OpenAPI document in a CI job, then generate a child pipeline with jobs for each split document. This made iterating a lot easier and more agile. We have uploaded [the scripts and pipeline configuration](https://gitlab.com/eugene_lim/api-fuzzing-dogfooding) for reference.\n\nBy tweaking the number of parallel jobs and fuzzing profile, we were eventually able to achieve a reasonably comprehensive fuzzing session in an acceptable time frame. When tuning your own fuzzing workflow, balancing these trade-offs is essential.\n\n## Triaging the API fuzzing findings\n\nWith the fuzzing done, we were now confronted with hundreds of findings. Unlike DAST analyzers that try to detect specific vulnerabilities, Web API Fuzz Testing looks for unexpected behavior and errors that may not necessarily be vulnerabilities. This is why fuzzing faults discovered by the API Fuzzing Analyzer show up as vulnerabilities with a severity of “Unknown.” This requires more involved triaging.\n\nFortunately, the Web API fuzzer also outputs Postman collections as artifacts in the Vulnerability Report page. These collections allow you to quickly repeat requests that triggered a fault during fuzzing. For this stage of the fuzzing workflow, we recommend that you set up a local instance of the application so that you can easily check logs and debug specific faults. In this case, we ran the [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit).\n\nMany of the faults occurred due to a lack of error handling for unexpected inputs. We created issues from the Vulnerability Report page, and if we found that a particular fault had the same root cause as a previously triaged fault, we linked the vulnerability to the original issue instead.\n\n## Lessons learned\n\nThe API fuzzing dogfooding project turned out to be a fruitful exercise that benefited other workstreams at GitLab, such as the API documentation project. In addition, tuning and triaging helped us identify key pain points in the process for improvement. Automated API documentation generation is difficult even with OpenAPI, particularly on a long-lived codebase. GitLab’s existing annotations and tests helped speed up documentation via a distributed, asynchronous workflow across multiple teams. In addition, many GitLab features such as Review Apps, Vulnerability Reports, and dynamically generated child pipelines helped us build a robust fuzzing workflow.\n\nThere are still many improvements that can be made to the workflow. Moving to OpenAPI v3 could improve endpoint coverage. The Secure team also wrote a [HAR Recorder](https://gitlab.com/gitlab-org/security-products/har-recorder) tool that could help generate HAR files on the fly instead of relying on static documentation. For now, due to the high compute cost of fuzzing thousands of operations in GitLab’s API, the workflow is better suited to a scheduled pipeline instead of GitLab’s core pipeline.\n\nFor teams that have already implemented several layers of static and dynamic checks and want to take further steps to increase coverage, we recommend trying a Web API fuzzing exercise as a way to validate assumptions and discover “unknown unknowns” in your code.\n\nWe encourage you to get familiar with API fuzzing and let us know how it works for you. If you face any issues or have any feedback, please file an issue at the [issue tracker on GitLab.com](https://gitlab.com/gitlab-org/gitlab/-/issues/). Use the `~\"Category:API Security\"` label when opening a new issue regarding API fuzzing to ensure it is quickly reviewed by the appropriate team members.\n",[9,720,9,722,1294],{"slug":1322,"featured":6,"template":680},"building-gitlab-with-gitlab-api-fuzzing-workflow","content:en-us:blog:building-gitlab-with-gitlab-api-fuzzing-workflow.yml","Building Gitlab With Gitlab Api Fuzzing Workflow","en-us/blog/building-gitlab-with-gitlab-api-fuzzing-workflow.yml","en-us/blog/building-gitlab-with-gitlab-api-fuzzing-workflow",{"_path":1328,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1329,"content":1334,"config":1343,"_id":1345,"_type":14,"title":1346,"_source":16,"_file":1347,"_stem":1348,"_extension":19},"/en-us/blog/building-gitlab-with-gitlab-stress-testing-product-analytics",{"title":1330,"description":1331,"ogTitle":1330,"ogDescription":1331,"noIndex":6,"ogImage":1311,"ogUrl":1332,"ogSiteName":667,"ogType":668,"canonicalUrls":1332,"schema":1333},"Building GitLab with GitLab: Stress-testing Product Analytics","We put Product Analytics through its paces internally to prep it for Beta. Find out what that entailed and how it led to feature improvements.","https://about.gitlab.com/blog/building-gitlab-with-gitlab-stress-testing-product-analytics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: Stress-testing Product Analytics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"James Heimbuck\"},{\"@type\":\"Person\",\"name\":\"Sam Kerr\"}],\n        \"datePublished\": \"2023-12-14\",\n      }",{"title":1330,"description":1331,"authors":1335,"heroImage":1311,"date":1338,"body":1339,"category":1340,"tags":1341},[1336,1337],"James Heimbuck","Sam Kerr","2023-12-14","To best understand how your features being developed and shipped are helping you meet your goals, you need data. The previously announced [Product Analytics feature set](https://about.gitlab.com/blog/introducing-product-analytics-in-gitlab/) helps our customers do just that by providing tools to instrument code and process and visualize the data – all within GitLab.\n\n## Privacy first\n\nWe know customer privacy is a big concern for our customers and our customer's customers. As we said in our [announcement blog](https://about.gitlab.com/blog/introducing-product-analytics-in-gitlab/#our-continued-commitment-to-user-privacy):\n\n\u003Cp>\u003Ccenter>\"Product Analytics is designed to honor commonly recognized opt-out signals and we are designing Product Analytics to give you full control over the data being collected on a cluster managed by GitLab or your own.\"\u003C/center>\u003C/p>\n\nNothing about that approach has changed and it is too important not to mention again.\n\n## Customer Zero and the biggest customer\n\nWe are progressing quickly towards the open beta for Product Analytics. We are currently feature-complete for the beta with the managed product analytics stack, [five existing SDKs for instrumentation](https://docs.gitlab.com/ee/user/product_analytics/#instrument-a-gitlab-project), [default dashboards](https://docs.gitlab.com/ee/user/analytics/analytics_dashboards.html#product-analytics), and the recently released  improved Dashboard and Visualization Designer experiences. We are also learning more about what problems our internal users still have that they cannot solve with Product Analytics.\n\nAs we prepare for the Beta release of Product Analytics, it is important for us to know how the Managed Product Analytics stack will stand up to a bigger event load than we are getting from the initial customers and internal users. With our commitment to dogfooding, adding more internal projects was the obvious answer, so we worked with more internal teams to add instrumentation for the Metrics Dictionary and [GitLab Design System](https://design.gitlab.com/) sites.\n\nInstrumenting internal projects gave us additional feedback about the setup of Product Analytics and the usefulness of the Audience and Behavior Dashboards, showing how many users were visiting and what pages they visited. These gave us great insights into the usefulness of Product Analytics, but did not provide the volume of events we needed to really stress test Product Analytics at the scale we wanted. \n\n![product-analytics-default-dashboard-list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683252/Blog/Content%20Images/product-analytics-default-dashboard-list.png)\n\nAt the same time the Analytics Instrumentation team was hard at work developing an event framework to make instrumentation easier for GitLab developers. This lets the GitLab teams create new features and update existing ones faster to understand how changes impact our users. This also made it much easier and faster to add Product Analytics to GitLab.com, which provided the event volume that would stress test the Product Analytics stack so we could validate our assumptions.\n\nOnce fully enabled, with all page views and events going to the Managed Product Analytics stack, we saw a 17x increase in load above all other internally instrumented projects, receiving over 20 million events a day. That is a lot of events!\n\nBy instrumenting GitLab.com, we were able to see the stress cracks in our infrastructure _before_ introducing the features to users in our Beta. We were able to validate our scaling strategies, identify and resolve query performance concerns, improve the onboarding experience for our upcoming Beta program, and plan future improvements as we work towards [general availability](https://gitlab.com/groups/gitlab-org/-/epics/9902).\n\nWe have also proved to ourselves that Product Analytics can stand up to future customer load without making customers suffer through outages or slowness as we make the stack better.\n\n## What’s next for Product Analytics\n\nThroughout the internal release and the experiment phase, we have been talking to customers about what is and is not working with Product Analytics, especially the [built-in dashboards](https://docs.gitlab.com/ee/user/analytics/analytics_dashboards.html#product-analytics). From that feedback we have a number of improvements in mind that can't all fit here but check out our [Product Analytics direction page](https://about.gitlab.com/direction/monitor/product-analytics/#what-is-next-for-us-and-why) to see the latest on what improvements are coming next.\n\nTalking directly with users of Product Analytics is also informing the next iterations of other features like [Customizable Dashboards](https://gitlab.com/groups/gitlab-org/-/epics/8574) and [Visualization Designer](https://gitlab.com/groups/gitlab-org/-/epics/9386). The team is also exploring ways to [leverage AI](https://gitlab.com/groups/gitlab-org/-/epics/10335) to make it easier to find and understand Product Analytics data. \n\n## Share your feedback\n\nIt is an exciting time in product analytics and we cannot wait for you to try the feature out yourself! You can add ideas or comments to our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/391970). We look forward to hearing from you!\n\n## Read more \"Building GitLab with GitLab\"\n\n- [Building GitLab with GitLab: How GitLab.com inspired Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n- [Building GitLab with GitLab: Web API Fuzz Testing](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n","devsecops",[1298,1342,1295,9],"product",{"slug":1344,"featured":91,"template":680},"building-gitlab-with-gitlab-stress-testing-product-analytics","content:en-us:blog:building-gitlab-with-gitlab-stress-testing-product-analytics.yml","Building Gitlab With Gitlab Stress Testing Product Analytics","en-us/blog/building-gitlab-with-gitlab-stress-testing-product-analytics.yml","en-us/blog/building-gitlab-with-gitlab-stress-testing-product-analytics",{"_path":1350,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1351,"content":1357,"config":1363,"_id":1365,"_type":14,"title":1366,"_source":16,"_file":1367,"_stem":1368,"_extension":19},"/en-us/blog/choosing-a-compliance-framework",{"title":1352,"description":1353,"ogTitle":1352,"ogDescription":1353,"noIndex":6,"ogImage":1354,"ogUrl":1355,"ogSiteName":667,"ogType":668,"canonicalUrls":1355,"schema":1356},"How GitLab went about choosing the right compliance framework","Independent vs aggregate? Determining the most effective security controls approach for any organization has many considerations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680591/Blog/Hero%20Images/compliance-frameworks.jpg","https://about.gitlab.com/blog/choosing-a-compliance-framework","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab went about choosing the right compliance framework\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeff Burrows\"}],\n        \"datePublished\": \"2019-05-07\",\n      }",{"title":1352,"description":1353,"authors":1358,"heroImage":1354,"date":1360,"body":1361,"category":720,"tags":1362},[1359],"Jeff Burrows","2019-05-07","\n\nIn most cases, information security compliance is a notoriously difficult area for smaller companies to get started with. Generally, when a company is large enough to have compliance needs, that company has already established a lot of its operating processes and configured the infrastructure.\n\nIn GitLab's case, we started our formalized compliance program towards the end of our [Series C funding round](/blog/gitlab-raises-20-million-to-complete-devops/), which is actually earlier than a lot of companies our size. This timing afforded GitLab a terrific opportunity to build out our compliance program. We were able to take a step back and consider the most efficient use of our personnel without an immediate need for external compliance certifications.\n\n## Defining security controls: Independent or aggregate?\n\nWhen it was time to identify security controls that would match up with processes and structure, we were faced with the decision a lot of small companies encounter: Do we treat each information security framework we have an interest in – or need for – independently, or do we try and aggregate these controls in a way that gives us natural alignment to underlying frameworks?\n\nBy interacting with industry frameworks (e.g. [ISO](https://www.iso.org/home.html), [SOC](https://www.aicpa.org/interestareas/frc/assuranceadvisoryservices/sorhome.html), [PCI](https://www.pcisecuritystandards.org/), etc.) individually we would have clarity with each individual control in terms of scope and applicability. But we would have been reaching out to our internal teams with hundreds of individual controls, many of which overlap. An example of this overlap is that PCI DSS V3.2.1, SOC2 Common Controls, and ISO 27001 all require business continuity plans. With an individualized approach to security frameworks, we would be treating each business continuity plan separately and would run the risk of making multiple requests to GitLab teams in order to satisfy all requirements.\n\nBy adopting an “umbrella framework” approach and leveraging an open source option (i.e. [Adobe’s CCF](https://blogs.adobe.com/security/2017/05/open-source-ccf.html)), we’ve been able to build in efficiency and ensure that when we interact with our internal teams, we are not requesting the same information in multiple formats. In the above PCI DSS V3.2.1, SOC2 Common Controls, and ISO 27001 example, choosing an umbrella framework means evaluating all the individual requirements collectively and creating a control statement that fulfills the needs of each of the controls simultaneously. This creates an overarching security control that allows us to make a single request for business continuity information to each GitLab team and eliminates having to collect slightly different information depending on the framework we are working with at any given time. By being thoughtful about what is asked for, the compliance group gains internal credibility. The more agile and efficient we can enable our teams to be, the more productive GitLab becomes.\n\n## The GitLab approach\n\nWe’ve already begun adapting Adobe's framework to satisfy our own needs. This unified framework approach has allowed us to quickly create security controls and start building out the supporting guidance and policy information. And we’ve been able to stand up a comprehensive compliance program – in months, not years.\n\nAs we spend more time customizing the Adobe CCF open source framework and aligning the compliance process to the GitLab product workflow, we plan to share what we’ve created and what we’ve learned along the way through a series of blog posts. We’ll also make some of these resources available to our customers in the hopes that it can help other organizations jump start their own compliance journeys.\n\nDo you have thoughts on the approach GitLab is taking with our compliance framework adoption?  Or maybe you have feedback on particular compliance needs you’d like to see GitLab address going forward? Share your thoughts with us below; we’d love to hear from you!\n\nCover image by [Erik Witsoe](https://unsplash.com/@ewitsoe) on [Unsplash](https://unsplash.com/photos/mODxn7mOzms)\n{: .note}\n",[677,9,720],{"slug":1364,"featured":6,"template":680},"choosing-a-compliance-framework","content:en-us:blog:choosing-a-compliance-framework.yml","Choosing A Compliance Framework","en-us/blog/choosing-a-compliance-framework.yml","en-us/blog/choosing-a-compliance-framework",{"_path":1370,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1371,"content":1377,"config":1382,"_id":1384,"_type":14,"title":1385,"_source":16,"_file":1386,"_stem":1387,"_extension":19},"/en-us/blog/cofounder-relations",{"title":1372,"description":1373,"ogTitle":1372,"ogDescription":1373,"noIndex":6,"ogImage":1374,"ogUrl":1375,"ogSiteName":667,"ogType":668,"canonicalUrls":1375,"schema":1376},"The secret to an enduring co-founder relationship? Have those crucial conversations","Our CEO sits down with leadership psychologist Banu Hantal to discuss his relationship with GitLab co-founder Dmitriy Zaporozhets.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680642/Blog/Hero%20Images/cofounders_phone.jpg","https://about.gitlab.com/blog/cofounder-relations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The secret to an enduring co-founder relationship? Have those crucial conversations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-06-21\",\n      }",{"title":1372,"description":1373,"authors":1378,"heroImage":1374,"date":1379,"body":1380,"category":299,"tags":1381},[672],"2019-06-21","\n\nIn the latest installment of our [Pick Your Brain](/blog/tags.html#pick-your-brain) series, leadership psychologist [Banu Hantal](https://www.banuhantal.com/) interviews our CEO [Sid Sijbrandij](/company/team/#sytses) about his relationship with co-founder and engineering fellow [Dmitriy Zaporozhets](/company/team/#dzaporozhets). In their discussion, Sid shares GitLab’s origin story and talks about how transparent communication with Dmitriy helps keep their partnership strong.\n\n## The beginning of GitLab\n\nDmitriy and Sid’s partnership started in the same place as most modern-day relationships: online. Dmitriy started GitLab while he was working elsewhere, and within a year of GitLab’s launch, 300 people had contributed code.\n\nSid saw that GitLab had potential as a service and started GitLab.com independently of Dmitriy. Sid didn’t need Dmitriy’s permission to do this, because [GitLab was (and partially remains) open source](/blog/gitlab-is-open-core-github-is-closed-source/), but reached out to Dmitriy to let him know about the next iteration of the project. Dmitriy was gracious and celebrated the fact that Sid was expanding the impact of GitLab.\n\nFor about a year, Sid invested in building GitLab.com while also working as a consultant until Dmitriy posted a tweet saying, “I want to work on GitLab full time.” That tweet changed GitLab’s story.\n\n“It was quite unusual to post that to the entire world. He was employed and everything,” says Sid. “I emailed Dmitriy and I said ‘Hey, I saw your tweet, how much do you want to earn to start working on GitLab?’”\n\nBy this time, there were a few big companies that were using GitLab.com and they were asking Sid to add new features to the product. Once Dmitriy came on board, it was possible to build those features quicker.\n\n“I went to the local Western Union money office, and when I said I wanted to wire money from the Netherlands to the Ukraine, they were like, ‘Do you know this person or is this someone you met over the internet?’”\n\n“You didn’t even know what Dmitriy looked like?!” exclaims Banu.\n\n“At that point my mental image of Dmitriy was like a pink mob boss because that was his avatar,” says Sid, but that didn’t last for long. They finally met in person in Krakow shortly after making plans to commit their efforts to GitLab full time.\n\n## Communication makes for happy co-founders\n\n\"Do you think having a mostly remote relationship is an advantage or disadvantage?\" asks Banu.\n\n\"I don't think it matters that much,\" says Sid. \"I think you do the same things, and you've got to make sure there's regular communication. To this day we have a call every single week. When there’s something important he gets a heads up so he doesn’t feel misinformed.\"\n\nIf there is an issue that is clearly contentious, Sid says, they put all the information on the table and discuss the problem directly.\n\n“I think surprises are really bad. You want to make sure if there’s something important that you get a heads up, and that there is a regular cadence of communication.”\n\nThough Dmitriy and Sid rarely get the chance to interact in person today, there is very little conflict in their relationship.\n\n## What to do when one co-founder is the CEO\n\n“What you don’t want is the [Peter Principle](https://en.wikipedia.org/wiki/Peter_principle), where the only way for an engineer to advance is to become a manager,” says Sid. “And then, oftentimes, you lose a great engineer and get a bad manager.”\n\nSo, they elected to structure GitLab the company so there are more leadership opportunities for engineers by offering a dual-career track. While Sid is co-founder and CEO of the company, Dmitriy is a co-founder and engineering fellow. A fellowship offers a path to advancement for engineers that does not involve people management.\n\nThough GitLab was first built as an alternative to GitHub, it has since expanded its technical capabilities ten-fold, explains Sid. In fact, it was Dmitriy that first built the [CI solution](/features/continuous-integration/) and continuous testing framework which is a core component to our product today.\n\n“I was like, he can do whatever he wants – he’s a co-founder and so far his hunches pay off. At a certain point someone contributed to that and then they joined the company and said, 'Let’s integrate the two products.' First Dmitriy told him he was wrong, and then together they came to me and I told them they were wrong, and we ended up doing it and it was the best thing that ever happened to GitLab.”\n\n“How would you describe your relationship with Dmitriy?” asks Banu.\n\nThere are three dimensions to the partnership between Sid and Dmitriy. They are co-founders, there is a hierarchical relationship with Sid as CEO, and of course, a friendship.\n\n“I think it’s frequently better to fall in love with each other’s work and then build a relationship based on that, rather than fall in love with the person and then try to build a business,” says Sid. “Friendships based on business tend to last longer than businesses based on friendships.”\n\nWatch the full conversation between Sid and Banu here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/gpQKtSKMzkI?start=6\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nPhoto by [Pavan Trikutam](https://unsplash.com/@ptrikutam) on [Unsplash](https://unsplash.com/photos/71CjSSB83Wo)\n{: .note}\n",[9,745,811],{"slug":1383,"featured":6,"template":680},"cofounder-relations","content:en-us:blog:cofounder-relations.yml","Cofounder Relations","en-us/blog/cofounder-relations.yml","en-us/blog/cofounder-relations",{"_path":1389,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1390,"content":1395,"config":1401,"_id":1403,"_type":14,"title":1404,"_source":16,"_file":1405,"_stem":1406,"_extension":19},"/en-us/blog/collaborating-on-a-cross-stage-feature",{"title":1391,"description":1392,"ogTitle":1391,"ogDescription":1392,"noIndex":6,"ogImage":800,"ogUrl":1393,"ogSiteName":667,"ogType":668,"canonicalUrls":1393,"schema":1394},"How we tested a feature that affected (almost) all parts of GitLab","Crowd-sourcing testing across teams","https://about.gitlab.com/blog/collaborating-on-a-cross-stage-feature","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we tested a feature that affected (almost) all parts of GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aakriti Gupta\"}],\n        \"datePublished\": \"2021-03-17\",\n      }",{"title":1391,"description":1392,"authors":1396,"heroImage":800,"date":1398,"body":1399,"category":698,"tags":1400},[1397],"Aakriti Gupta","2021-03-17","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nIn 13.9 Team Geo [released Maintenance Mode](https://about.gitlab.com/releases/2021/02/22/gitlab-13-9-released/#maintenance-mode), which was a large, cross stage and cross team project, a few milestones in the making.\n\nThis feature allows system administrators to put GitLab in a read-only mode. All parts of the system are affected and testing such a wide scope was challenging.\n\n## Why was testing this feature hard?\n\nAs we started testing with the QA team, it was clear that no one individual or team could know enough about the entire product to design a comprehensive QA plan. The more we tested, the more features we found to test - it was soon becoming an impossibly long list of tests to write for our small team.\n\nWe needed to prioritize manually testing the most important features, and save working on automated tests for another iteration.\n\nBut, what were the most important things to test?\n\nThis is where we decided to crowd-source testing. [We rolled-out discussion issues](https://gitlab.com/dashboard/issues?scope=all&utf8=%E2%9C%93&state=closed&author_username=aakriti.gupta&search=crowd-sourced+maintenance+mode+testing) to each of the 13 stages and asked them to contribute the three most important features that they own, that we should prioritise testing.\n\nWe used these issues to share knowledge of maintenance mode, and responsibility of its development, testing and documentation.\n\nThe response was overwhelming!\n\nProduct managers and engineers from across the development department contributed to our list of tests and collaboratively reviewed and improved documentation. They proactively asked how their features would behave and in some cases, even started MRs to fix the documentation.\n\nThe conversations helped us hone our plan for future iterations of this feature.\n\n## What we learned\n1\\. **Test iteratively and collaboratively**\n\nGet QA and developer teams working together early, instead of after development is almost done, or worse - after release. GitLab's [Quad planning](https://about.gitlab.com/handbook/engineering/quality/quality-engineering/quad-planning/) process was introduced last year to foster better collaboration between Quality, Development, UX, and Product teams. As [Jennie from QA](https://gitlab.com/jennielouie) chalked out a plan for QA together with developers, she found a few edge cases that would have otherwise been discovered too late.\n\n2\\. **Don’t hesitate to ask other teams to contribute**\n\nWhen we rolled out a dozen plus issues to all development teams, we were not sure if we’d get even a few responses, but we were overwhelmed with the interest, response and active participation that came from all the teams.\n\n3\\. **Communicate well**\n\nGive people enough and succinct information. When requesting help from other teams, help them prioritize the request by explaining the why.\n\n4\\. **Documentation as a form of developer communication**\n\nAs we worked through large documentation MRs, I realized the documentation was not only important for system administrators, but for developers of GitLab as well. Developers wanted to know how maintenance mode affected their features.\n\n5\\. **Iterate**\n\nKeep the discussions short-lived and focused on the most important aspects. Do not draw out the conversations too long, and move pending conversations over to follow-up issues.\nAs we learned of new test cases, [Nick from QA](https://gitlab.com/nwestbury) and I created follow-up test issues to resolve together with DRIs.\n\n6\\. **The more, the merrier**\n\nWhile the discussions started only with Engineering Managers and Product Managers, they often invited engineers in their conversations and this brought more eyes to the project and helped us answer a lot of unknowns.\n",[811,9,832,722,723],{"slug":1402,"featured":6,"template":680},"collaborating-on-a-cross-stage-feature","content:en-us:blog:collaborating-on-a-cross-stage-feature.yml","Collaborating On A Cross Stage Feature","en-us/blog/collaborating-on-a-cross-stage-feature.yml","en-us/blog/collaborating-on-a-cross-stage-feature",{"_path":1408,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1409,"content":1415,"config":1421,"_id":1423,"_type":14,"title":1424,"_source":16,"_file":1425,"_stem":1426,"_extension":19},"/en-us/blog/compose-readers-and-writers-in-golang-applications",{"title":1410,"description":1411,"ogTitle":1410,"ogDescription":1411,"noIndex":6,"ogImage":1412,"ogUrl":1413,"ogSiteName":667,"ogType":668,"canonicalUrls":1413,"schema":1414},"Compose Readers and Writers in Golang applications","GitLab streams terabytes of Git data every hour using Golang abstractions of I/O implementations. Learn how to compose Readers and Writers in Golang apps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099464/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_639935439_3oqldo5Yt5wPonEJYZOLTM_1750099464124.jpg","https://about.gitlab.com/blog/compose-readers-and-writers-in-golang-applications","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Compose Readers and Writers in Golang applications\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Igor Drozdov\"}],\n        \"datePublished\": \"2024-02-15\",\n      }",{"title":1410,"description":1411,"authors":1416,"heroImage":1412,"date":1418,"body":1419,"category":1291,"tags":1420},[1417],"Igor Drozdov","2024-02-15","Every hour, GitLab transfers terabytes of Git data between a server and a client. It is hard or even impossible to handle this amount of traffic unless it is done efficiently in a streaming fashion. Git data is served by Gitaly (Git server), GitLab Shell (Git via SSH), and Workhorse (Git via HTTP(S)). These services are implemented using Go - the language that conveniently provides abstractions to efficiently deal with I/O operations.\n\nGolang's [`io`](https://pkg.go.dev/io) package provides [`Reader`](https://pkg.go.dev/io#Reader) and [`Writer`](https://pkg.go.dev/io#Writer) interfaces to abstract the functionality of I/O implementations into public interfaces.\n\n`Reader` is the interface that wraps the basic `Read` method:\n\n```go\ntype Reader interface {\n\tRead(p []byte) (n int, err error)\n}\n```\n\n`Writer` is the interface that wraps the basic `Write` method.\n\n```go\ntype Writer interface {\n\tWrite(p []byte) (n int, err error)\n}\n```\n\nFor example, [`os`](https://pkg.go.dev/os) package provides an implementation of reading a file. `File` type implements `Reader` and `Writer` interfaces by defining basic [`Read`](https://pkg.go.dev/os#File.Read) and [`Write`](https://pkg.go.dev/os#File.Write) functions.\n\nIn this blog post, you'll learn how to compose Readers and Writers in Golang applications.\n\nFirst, let's read from a file and write its content to [`os.Stdout`](https://cs.opensource.google/go/go/+/master:src/os/file.go;l=66?q=Stdout&ss=go%2Fgo).\n\n```go\nfunc main() {\n\tfile, err := os.Open(\"data.txt\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\n\tp := make([]byte, 32 * 1024)\n\tfor {\n\t\tn, err := file.Read(p)\n\n\t\t_, errW := os.Stdout.Write(p[:n])\n\t\tif errW != nil {\n\t\t\tlog.Fatal(errW)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n```\n\nEach call of the `Read` function fills the buffer `p` with the content from the file, i.e. the file is being consumed in chunks (up to `32KB`) instead of being fully loaded into the memory.\n\nTo simplify this widely used pattern, `io` package conveniently provides [`Copy`](https://pkg.go.dev/io#Copy) function that allows passing content from any `Reader` to any `Writer` and also [handles](https://cs.opensource.google/go/go/+/refs/tags/go1.21.0:src/io/io.go;l=433) additional edge cases.\n\n```go\nfunc main() {\n\tfile, err := os.Open(\"data.txt\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\n\tif _, err := io.Copy(os.Stdout, file); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n```\n\n`Reader` and `Writer` interfaces are used across the whole Golang ecosystem because they facilitate reading and writing content in a streaming fashion. Therefore, gluing together the Readers and Writers with the functions that expect these interfaces as arguments is a frequent problem to solve. Sometimes it's as straightforward as passing content from a Reader into a Writer, but sometimes the content written into a Writer must be represented as a Reader or the content from a reader must be sent into multiple Writers. Let's have a closer look into different use cases and the examples of solving these types of problems in the `GitLab` codebase.\n\n## Reader -> Writer\n\n**Problem**\n\nWe need to pass content from a Reader into a Writer.\n\n![readers and writers - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099494917.png)\n\n**Solution**\n\nThe problem can be solved by using [`io.Copy`](https://pkg.go.dev/io#Copy).\n\n```go\nfunc Copy(dst Writer, src Reader) (written int64, err error)\n```\n\n**Example**\n\n[`InfoRefs*`](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/gitaly/smarthttp.go#L18-35) Gitaly RPCs return a `Reader` and we want to [stream](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/git/info-refs.go#L78-80) its content to a user via HTTP response:\n\n```go\nfunc handleGetInfoRefsWithGitaly(ctx context.Context, responseWriter *HttpResponseWriter, a *api.Response, rpc, gitProtocol, encoding string) error {\n        ...\n        infoRefsResponseReader, err := smarthttp.InfoRefsResponseReader(ctx, &a.Repository, rpc, gitConfigOptions(a), gitProtocol)\n        ...\n        if _, err = io.Copy(w, infoRefsResponseReader); err != nil {\n            return err\n        }\n        ...\n}\n```\n\n## Reader -> Multiple Writers\n\n**Problem**\n\nWe need to pass content from a Reader into multiple Writers.\n\n![readers and writers - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099494917.png)\n\n**Solution**\n\nThe `io` package provides [`io.MultiWriter`](https://pkg.go.dev/io#MultiWriter) function that _converts_ multiple Writers into a single one. When its `Write` function is called, the content is copied to all the Writers ([implementation](https://cs.opensource.google/go/go/+/refs/tags/go1.21.0:src/io/multi.go;l=127)).\n\n```go\nfunc MultiWriter(writers ...Writer) Writer\n```\n\n**Example**\n\nGiven we want to [build](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/multi_hash.go#L13-18) `md5`, `sha1`, `sha256` and `sha512` hashes from the same content. [`Hash`](https://pkg.go.dev/hash#Hash) type is a `Writer`. Using `io.MultiWriter`, we define [`multiHash`](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/multi_hash.go#L43-61) Writer. After the content is [written](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/destination.go#L124-125) to the `multiHash`, we [calculate](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/multi_hash.go#L63-70) the hashes of all these functions in a single run.\n\nThe simplified version of the example is:\n\n```go\npackage main\n\nimport (\n\t\"crypto/sha1\"\n\t\"crypto/sha256\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n)\n\nfunc main() {\n\ts1 := sha1.New()\n\ts256 := sha256.New()\n\n\tw := io.MultiWriter(s1, s256)\n\tif _, err := w.Write([]byte(\"content\")); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(s1.Sum(nil))\n\tfmt.Println(s256.Sum(nil))\n}\n```\n\nFor simplicity, we just call `Write` function on a Writer, but when content comes from a Reader, then `io.Copy` can be used as well:\n\n```go\n_, err := io.Copy(io.MultiWriter(s1, s256), reader)\n```\n\n## Multiple Readers -> Reader\n\n**Problem**\n\nWe have multiple Readers and need to sequentially read from them.\n\n![readers and writers - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099494919.png)\n\n**Solution**\n\nThe `io` package provides [`io.MultiReader`](https://pkg.go.dev/io#MultiReader) function that _converts_ multiple Readers into a single one. The Readers are read in the passed order.\n\n```go\nfunc MultiReader(readers ...Reader) Reader\n```\n\nThen this Reader can be used in any function that accepts `Reader` as an argument.\n\n**Example**\n\nWorkhorse [reads](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/cmd/gitlab-resize-image/png/reader.go#L26-38) the first `N` bytes of an image to detect whether it's a PNG file and _puts them back_ by building a Reader from multiple Readers:\n\n```go\nfunc NewReader(r io.Reader) (io.Reader, error) {\n\tmagicBytes, err := readMagic(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif string(magicBytes) != pngMagic {\n\t\tdebug(\"Not a PNG - read file unchanged\")\n\t\treturn io.MultiReader(bytes.NewReader(magicBytes), r), nil\n\t}\n\n\treturn io.MultiReader(bytes.NewReader(magicBytes), &Reader{underlying: r}), nil\n}\n```\n\n## Multiple Readers -> Multiple Writers\n\n**Problem**\n\nWe need to pass content from multiple Readers into multiple Writers.\n\n![readers and writers - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099494921.png)\n\n**Solution**\n\nThe solutions above can be generalized on the many-to-many use case.\n\n```go\n_, err := io.Copy(io.MultiWriter(w1, w2, w3), io.MultiReader(r1, r2, r3))\n```\n\n## Reader -> Reader + Writer\n\n**Problem**\n\nWe need to read content from a Reader or pass the Reader to a function and simultaneously write the content into a Writer.\n\n![readers and writers - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099494923.png)\n\n**Solution**\n\nThe `io` package provides [io.TeeReader](https://pkg.go.dev/io#TeeReader) function that accepts a Reader to read from, a Writer to write to, and returns a Reader that can be processed further.\n\n```go\nfunc TeeReader(r Reader, w Writer) Reader\n```\n\nThe [implementation](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/io/io.go;l=610) of the functionality is straightforward. The passed `Reader` and `Writer` are stored in a structure that is a `Reader` itself:\n\n```go\nfunc TeeReader(r Reader, w Writer) Reader {\n\treturn &teeReader{r, w}\n}\n\ntype teeReader struct {\n\tr Reader\n\tw Writer\n}\n```\n\nThe `Read` function implemented for the structure delegates the `Read` to the passed `Reader` and also performs a `Write` to the passed `Writer`:\n\n```\nfunc (t *teeReader) Read(p []byte) (n int, err error) {\n\tn, err = t.r.Read(p)\n\tif n > 0 {\n\t\tif n, err := t.w.Write(p[:n]); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\treturn\n}\n```\n\n**Example 1**\n\nWe already touched hashing topic in the `Multiple Writers -> Writer` section and `io.TeeReader` is [used](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/upload/destination/destination.go#L124-125) to provide a Writer to create a hash from content. The returned Reader can be further used to upload content to object storage.\n\n**Example 2**\n\nWorkhorse uses `io.TeeReader` to [implement](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/dependencyproxy/dependencyproxy.go#L57-101) Dependency Proxy [functionality](https://docs.gitlab.com/ee/user/packages/dependency_proxy/). Dependency Proxy caches requested upstream images in the object storage. The not-yet-cached use case has the following behavior:\n\n- A user performs an HTTP request.\n- The upstream image is fetched using [`net/http`](https://pkg.go.dev/net/http) and [`http.Response`](https://pkg.go.dev/net/http#Response) provides its content via `Body` field, which is [`io.ReadCloser`](https://pkg.go.dev/io#ReadCloser) (basically an `io.Reader`).\n- We need to send this content back to the user by writing it into [`http.ResponseWriter`](https://pkg.go.dev/net/http#ResponseWriter) (basically an `io.Writer`).\n- We need to simultaniously upload the content to object storage by performing an [`http.Request`](https://pkg.go.dev/net/http#NewRequest) (a function that accepts an `io.Reader`).\n\nAs a result, `io.TeeReader` can be used to glue these primitives together:\n\n```go\nfunc (p *Injector) Inject(w http.ResponseWriter, r *http.Request, sendData string) {\n\t// Fetch upstream data via HTTP\n\tdependencyResponse, err := p.fetchUrl(r.Context(), sendData)\n\t...\n\t// Create a tee reader. Each Read will read from dependencyResponse.Body and simultaneously\n        // perform a Write to w writer\n\tteeReader := io.TeeReader(dependencyResponse.Body, w)\n\t// Pass the tee reader as the body of an HTTP request to upload it to object storage\n\tsaveFileRequest, err := http.NewRequestWithContext(r.Context(), \"POST\", r.URL.String()+\"/upload\", teeReader)\n\t...\n\tnrw := &nullResponseWriter{header: make(http.Header)}\n\tp.uploadHandler.ServeHTTP(nrw, saveFileRequest)\n\t...\n```\n\n## Writer -> Reader\n\n**Problem**\n\nWe have a function that accepts a Writer, and we are interested in the content that the function would write into the Writer. We want to intercept the content and represent it as a Reader to further process it in a streaming fashion.\n\n![readers and writers - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099494924.png)\n\n**Solution**\n\nThe `io` package provides [`io.Pipe`](https://pkg.go.dev/io#Pipe) function that returns a Reader and a Writer:\n\n```go\nfunc Pipe() (*PipeReader, *PipeWriter)\n```\n\nThe Writer can be used to be passed to the function that accepts a Writer. All the content that has been written into it will be accessible via the reader, i.e. a synchronous in-memory pipe is created that can be used to connect code expecting an `io.Reader` with code expecting an `io.Writer`.\n\n**Example 1**\n\nFor [LSIF](https://lsif.dev/) file [transformation](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/parser.go#L68-72) for code navigation we need to:\n\n- [Read](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/parser.go#L48-51) content of a zip file.\n- Transform the content and [serialize](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/docs.go#L97-112) it into [`zip.Writer`](https://pkg.go.dev/archive/zip#Writer).\n- [Represent](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/parser.go#L68-72) the new compressed content as a Reader to be further processed in a streaming fashion.\n\nThe [`zip.NewWriter`](https://pkg.go.dev/archive/zip#NewWriter) function accepts a Writer to which it will write the compressed content. It is handy when we need to pass an open file descriptor to the function to save the content to the file. However, when we need to pass the compressed content via an HTTP request, we need to represent the data as a Reader.\n\n```go\n// The `io.Pipe()` creates a reader and a writer.\npr, pw := io.Pipe()\n\n// The writer is passed to `parser.transform` function which will write\n// the transformed compressed content into it\n// The writing should happen asynchronously in a goroutine because each `Write` to\n// the `PipeWriter` blocks until it has satisfied one or more `Read`s from the `PipeReader`.\ngo parser.transform(pw)\n\n// Everything that has been written into it is now accessible via the reader.\nparser := &Parser{\n\tDocs: docs,\n\tpr:   pr,\n}\n\n// pr is a reader that can be used to read all the data written to the pw writer\nreturn parser, nil\n```\n\n**Example 2**\n\nFor Geo setups [GitLab Shell](https://gitlab.com/gitlab-org/gitlab-shell) proxies all `git push` operations to secondary and redirects them to primary.\n\n- GitLab Shell establishes an SSH connection and defines [`ReadWriter`](https://gitlab.com/gitlab-org/gitlab-shell/blob/7898d8e69daf51a7b6e01052c4516ca70893a2d4/internal/command/readwriter/readwriter.go#L6-7) struct that has `In` field of `io.Reader` type to read data from a user and `Out` field of `io.Writer` type to send response to the user.\n- GitLab Shell performs an HTTP request to `/info/refs` and sends `response.Body` of type `io.Reader` to the user using [`io.Copy`](https://gitlab.com/gitlab-org/gitlab-shell/blob/7898d8e69daf51a7b6e01052c4516ca70893a2d4/internal/command/githttp/push.go#L60)\n- The user reacts to this response by sending data to `In` and GitLab Shell needs to read this data, convert it to a request expected by Git HTTP, and send it as an HTTP request to `/git-receive-pack`. This is where `io.Pipe` becomes useful.\n\n```go\nfunc (c *PushCommand) requestReceivePack(ctx context.Context, client *git.Client) error {\n\t// Define pipeReader and pipeWriter and use pipeWriter to collect all the data\n\t//sent by the user converted to a format expected by Git HTTP.\n\tpipeReader, pipeWriter := io.Pipe()\n\t// The writing happens asynchronously because it's a blocking operation\n\tgo c.readFromStdin(pipeWriter)\n\n\t// pipeReader can be passed as io.Reader and used to read all the data written to pipeWriter\n\tresponse, err := client.ReceivePack(ctx, pipeReader)\n\t...\n\t_, err = io.Copy(c.ReadWriter.Out, response.Body)\n\t...\n}\n\nfunc (c *PushCommand) readFromStdin(pw *io.PipeWriter) {\n\tvar needsPackData bool\n\n\t// Scanner reads the user input line by line\n\tscanner := pktline.NewScanner(c.ReadWriter.In)\n\tfor scanner.Scan() {\n\t\tline := scanner.Bytes()\n\t\t// And writes it to the pipe writer\n\t\tpw.Write(line)\n\t\t...\n\t}\n\n\t// The data that hasn't been processed by a scanner is copied if necessary\n\tif needsPackData {\n\t\tio.Copy(pw, c.ReadWriter.In)\n\t}\n\n\t// Close the pipe writer to signify EOF for the pipe reader\n\tpw.Close()\n}\n```\n\n## Try Golang\n\nGolang provides elegant patterns designed to efficiently process data in a streaming fashion. The patterns can be used to address new challenges or refactor the existing performance issues associated with high memory consumption.\n\n> Learn more about [GitLab and Golang](https://docs.gitlab.com/ee/development/go_guide/).\n",[1294,1297,9,1295],{"slug":1422,"featured":6,"template":680},"compose-readers-and-writers-in-golang-applications","content:en-us:blog:compose-readers-and-writers-in-golang-applications.yml","Compose Readers And Writers In Golang Applications","en-us/blog/compose-readers-and-writers-in-golang-applications.yml","en-us/blog/compose-readers-and-writers-in-golang-applications",{"_path":1428,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1429,"content":1435,"config":1441,"_id":1443,"_type":14,"title":1444,"_source":16,"_file":1445,"_stem":1446,"_extension":19},"/en-us/blog/concurrent-devops",{"title":1430,"description":1431,"ogTitle":1430,"ogDescription":1431,"noIndex":6,"ogImage":1432,"ogUrl":1433,"ogSiteName":667,"ogType":668,"canonicalUrls":1433,"schema":1434},"Making the case for \"concurrent DevOps\"","DevOps goes by a lot of different names, but we’ve settled on concurrent DevOps for now at least.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663648/Blog/Hero%20Images/gitlab-joins-cd-foundation.jpg","https://about.gitlab.com/blog/concurrent-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Making the case for \"concurrent DevOps\"\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-07-17\",\n      }",{"title":1430,"description":1431,"authors":1436,"heroImage":1432,"date":1437,"body":1438,"category":787,"tags":1439},[869],"2019-07-17","\nWhat’s in a name? Quite a lot, apparently, when it comes to the software development space. Over the last few years companies have come up with a number of different names to describe their DevOps efforts – BizDevOps, DevSecOps, and even “modern software development.” But here at GitLab we prefer the term “[concurrent DevOps](/topics/devops/).”\n\nTo explain the thought process behind our choice of concurrent DevOps and what it all might mean moving forward, GitLab CEO [Sid Sijbrandij](/company/team/#sytses) sat down with chief marketing officer [Todd Barr](/company/team/#tbarr) and corporate marketing senior director [Melissa Smolensky](/company/team/#melsmo). It’s safe to say [a healthy discussion ensued](https://www.youtube.com/watch?v=bDTYHGEIeM0).\n\n## Why “concurrent”?\n\n“In GitLab you’re not passing (code) along multiple stages,” explains Sid. “You don’t wait until something is ready and then send it off to some security testing. People can work in parallel. We call it concurrent because it can be parallel but it doesn't have to be.\"\n\nAnd concurrent DevOps stands out from what Sid calls “sequential DevOps.” Because no one is waiting for a handoff, or permission, everything goes faster, Sid offers. “I think concurrent DevOps could be a rallying cry,” he says. “If we can spread that idea, make it bigger than GitLab, it’s going to be easier for people to demand something like that and trust (us) with other solutions.”\n\n## Start with a mission (statement)\n\nBut Todd needs convincing that concurrent DevOps is the right term. “Concurrent DevOps isn’t really a category, it’s a benefit statement,” he says. He suggests a different approach, using our mission statement [“everyone can contribute”](/company/mission/#mission) as a starting point. “I think that has a lot of legs if we actually put more thought into what that means and what category that would mean if we’re creating a platform where everyone can contribute.”\n\n> Concurrent DevOps could be a rallying cry if we can spread that idea – make it bigger than GitLab\n\nSid agrees, in theory, that GitLab is creating a broader platform but doesn’t think the time is right, yet, to make that our main marketing message. “Yes, our visions are bigger. But if you’re too far ahead of where people think you are, you might fall flat on your face. If we can own DevOps I’d settle for that for the next few years.” Melissa agrees, pointing to the fact that enterprises still have a long way to go to integrate DevOps into their development lifecycles.\n\n## Size matters\n\nAnd there’s no question the DevOps market is sufficiently large to support GitLab’s growth, Sid says, referring to a report from Grand View Research that forecasts the market will be worth [nearly $13 billion in 2025](https://www.grandviewresearch.com/press-release/global-development-to-operations-devops-market). So the market opportunity is there, Todd agrees, and offers that both he and Melissa have been in the DevOps space so long they’ve sort of taken it for granted, which is why he suggested different terminology. “DevOps has become a term that's almost synonymous with future software lifecycle development,” he says. “But there's a people element that we've got to help people understand. With concurrent DevOps we're trying to be more inclusive in the process, or that's at least one benefit.”\n\nWe need to make the case that concurrent DevOps is better, Sid stresses, even if we eventually change the name later on. “Our big benefit is a single application for the entire DevOps lifecycle.”\n\n Watch the entire video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/bDTYHGEIeM0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nPhoto by [YIFEI CHEN](https://unsplash.com/photos/FPMRxKd7MxI?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/spiral-lights?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1440,9],"DevOps",{"slug":1442,"featured":6,"template":680},"concurrent-devops","content:en-us:blog:concurrent-devops.yml","Concurrent Devops","en-us/blog/concurrent-devops.yml","en-us/blog/concurrent-devops",{"_path":1448,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1449,"content":1455,"config":1461,"_id":1463,"_type":14,"title":1464,"_source":16,"_file":1465,"_stem":1466,"_extension":19},"/en-us/blog/conducting-remote-ux-research",{"title":1450,"description":1451,"ogTitle":1450,"ogDescription":1451,"noIndex":6,"ogImage":1452,"ogUrl":1453,"ogSiteName":667,"ogType":668,"canonicalUrls":1453,"schema":1454},"Conducting remote UX research at GitLab","Learn about the different kinds of UX research we conduct at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666775/Blog/Hero%20Images/cover.jpg","https://about.gitlab.com/blog/conducting-remote-ux-research","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Conducting remote UX research at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah O’Donnell\"}],\n        \"datePublished\": \"2017-12-20\",\n      }",{"title":1450,"description":1451,"authors":1456,"heroImage":1452,"date":1458,"body":1459,"category":743,"tags":1460},[1457],"Sarah O’Donnell","2017-12-20","GitLab is a [remote-only](http://www.remoteonly.org/) organization and just like our [team](/company/team/), our users are spread across the globe. Conducting remote UX research allows us to quickly connect with GitLab users anywhere in the world. It provides us with the opportunity to gather insight into users’ behaviors, motivations and goals when using GitLab. This helps us to determine what features should be built and how they should behave. But how do we do all this remotely?\n\n\u003C!-- more -->\n\nThese are some of the remote UX research methods we use at GitLab.\n\n## Card sorting\n\nCard sorting is a research method for discovering how people understand and categorize information. Each card represents an item or a topic and we ask users to group the cards in a way that makes sense to them. We may also ask them to help us label these groups.\n\nCard sorting can be used to:\n\n- Help design the information architecture of your application\n- Establish what information should be on a page and in what order that information should appear\n- Provide a ranking for items or topics based on a set criteria\n\nWhen analyzing a card sort, we look for common patterns such as which cards appear together the most and which cards are labeled in a similar way.\n\nAt GitLab, we’re currently using card sorting to restructure the sidebar navigation at a project and group level. We want to understand how you, our users, would expect our features to be grouped and classified. Our aim is to improve the ease and the speed at which you navigate around GitLab. We conduct remote card sorting via [Optimal Workshop](https://www.optimalworkshop.com/).\n\n## First-click testing\n\nFirst-click testing explores what users click on first when completing a task within an interface. It tells us whether users are able to find what they’re looking for quickly and easily. This research method is based on the principle that users are two to three times more likely to find what they are looking for if their initial click is correct, rather than a click in the wrong direction.\n\nWe’ve used first-click testing at GitLab to quickly evaluate multiple design ideas against one another. We share our designs with users via [UsabilityHub](https://usabilityhub.com/). We measure whether users take the correct path and how long it takes them to decide where to click. A slower click time would suggest a user has hesitated about where to click.\n\nFirst-click testing is great for providing an indication of whether a design is intuitive to users and helps us to quickly narrow down multiple design concepts.\n\n## Surveys\n\nSurveys are used to investigate the opinions or experiences of users by asking them questions through an online form. A survey invites people to share open and honest feedback. Some people find them less intimidating than other forms of research as there is the option to remain anonymous when providing answers. They also allow us to track how the attitudes and behaviors of our users change over time.\n\nWe’ve used surveys to understand our users and form [personas](https://design.gitlab.com/), to generate new ideas for future GitLab improvements and to help measure users’ satisfaction with our existing features.\n\n## User interviews\n\nIf you take part in a user interview at GitLab, you’ll usually be speaking one on one with a UX researcher. In order to do this, you’ll need a desktop or laptop computer and a headset with a microphone.\n\nWe find that most of our users like to talk with us on their lunch break at their work station, whether situated at home or in an office. We love this, as it provides some insight into the environment in which you use GitLab.\n\nOften our interviews are focused on you! We’ll ask you to chat about things such as your background, occupation and experience with GitLab. Sometimes we might have a particular topic we’d like to discuss, such as how you’ve incorporated GitLab into your workflow. We’ll always tell you our intentions ahead of the call so you have time to think about what you’d like to contribute to the discussion. We also welcome you to share your screen with us during the call. We understand that it is sometimes easier to show and demonstrate something than it is to just talk about it!\n\nWe’ve used feedback from user interviews to:\n\n- Inform our [personas](https://docs.gitlab.com/ee/development/ux_guide/users.html)\n- Follow up on survey answers\n- Understand and develop objectives and goals for features\n\n## Usability testing\n\nUsability testing is a technique used to evaluate a product by testing it with representative users. Usability testing can be divided into two categories: moderated and unmoderated research.\n\n**Moderated**\n\nIf you participate in moderated usability testing at GitLab, you’ll complete a series of tasks whilst being observed by one of our UX researchers. In order to see what you're doing, we'll ask you to share your screen with us. We use [Zoom](https://zoom.us/) to run our moderated usability testing sessions.\n\nAs you use GitLab, we’ll ask you to try and think out loud: tell us what you’re looking at, what you’re trying to do and what you’re thinking. We’re interested in hearing your honest feedback. Sound scary? It really isn’t! It’s important to remember that we’re testing GitLab, not you. You can’t say or do anything wrong during a study.\n\nModerated research allows for conversation between a user and the UX researcher, because both are online simultaneously. It gives the researcher the opportunity to ask a user follow-up questions regarding something they’ve said or done. Subsequently, moderated research provides us with a lot of in-depth qualitative research about our users’ needs. It can help us to uncover usability problems that we weren’t aware of and to generate solutions to solve these problems.\n\n**Unmoderated**\n\nUnlike moderated research, unmoderated research doesn't involve any conversation between a user and a UX researcher. Instead, unmoderated usability testing sessions are completed alone by a user. As users can complete sessions at their own convenience and studies can be run simultaneously, they're good for collecting data quickly.\n\nWe use [Validately](https://validately.com/) to serve the tasks to you and to record your actions. We then analyze the data collected asynchronously. It is, however, still very helpful to us if you try and think out loud while you’re completing tasks.\n\nUnmoderated research can provide some qualitative data. However, as there’s no opportunity to ask users follow-up questions related to their actions, the study should focus on a few specific elements or relatively minor changes. Unmoderated research is usually better at addressing specific quantitative questions, such as:\n\n- What percentage of users successfully completed the task?\n- How long did it take users to complete the task?\n\nAs a researcher cannot view an unmoderated usability testing session until it's completed, there's a risk of a study being unusable if the user didn't complete the tasks as specified or if they ran into technical difficulties.\n\nWe conduct both moderated and unmoderated usability testing sessions at GitLab to test new features and changes to existing features.\n\n## How can I get involved?\n\nWe’re always looking for people to participate in our research, whether you're a GitLab user or not. You can get involved by signing up for [GitLab First Look](/community/gitlab-first-look/), a comprehensive research program that will help us ship the features and fixes you need to do your best work.  Besides being instrumental in shaping the future of GitLab, you’ll have the opportunity to earn gift cards and win awesome tech prizes by sharing your feedback with us.\n",[832,9,700],{"slug":1462,"featured":6,"template":680},"conducting-remote-ux-research","content:en-us:blog:conducting-remote-ux-research.yml","Conducting Remote Ux Research","en-us/blog/conducting-remote-ux-research.yml","en-us/blog/conducting-remote-ux-research",{"_path":1468,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1469,"content":1475,"config":1480,"_id":1482,"_type":14,"title":1483,"_source":16,"_file":1484,"_stem":1485,"_extension":19},"/en-us/blog/considering-a-career-in-security",{"title":1470,"description":1471,"ogTitle":1470,"ogDescription":1471,"noIndex":6,"ogImage":1472,"ogUrl":1473,"ogSiteName":667,"ogType":668,"canonicalUrls":1473,"schema":1474},"Considering a career in security? Here’s some advice.","Eight team members from our Security department talk about what they've learned working in Tech and what advice they’d offer to someone considering a career in security.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670859/Blog/Hero%20Images/woctech-photo7.jpg","https://about.gitlab.com/blog/considering-a-career-in-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Considering a career in security? Here’s some advice.\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2021-03-04\",\n      }",{"title":1470,"description":1471,"authors":1476,"heroImage":1472,"date":1477,"body":1478,"category":698,"tags":1479},[1010],"2021-03-04","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nThis is post 3 of a 3 part series profiling several women in GitLab’s security organization.  See part [one](/blog/breaking-into-security/) and [two](/blog/whats-it-like-to-work-security-at-gitlab/).\n{: .note}\n\n_Breaking into technology, and security, can be difficult for anyone. At GitLab [31% of our workforce identifies as women](/handbook/people-group/people-success-performance-indicators/#diversity---women-at-gitlab). In our security department we have ten team members who are women out of a total of 48 team members; that’s 21%.  Global women in tech numbers are around 21.4% according to [CNET](https://www.cnet.com/news/microsofts-first-in-depth-diversity-report-shows-progress-remains-slow/) and this recent study, [“Resetting Tech Culture”](https://www.accenture.com/us-en/blogs/accenture-research/why-tech-is-losing-women-just-when-we-need-them-the-most) indicates that young women who go into tech drop out by the age of 35.  How do we change this?  GitLab is looking to help there through our [outbound hiring model](/handbook/hiring/candidate/faq/), [tracking and working toward key metrics](/handbook/people-group/people-success-performance-indicators/#diversity---women-in-management), [inclusion training](/company/culture/inclusion/#diversity-inclusion--belonging-training-and-learning-opportunities), [team member resource groups](/company/culture/inclusion/erg-guide/#how-to-join-current-tmrgs-and-their-slack-channels), Engineering department-based developmental and networking groups (like our [Women in Security group](/handbook/security/women-in-security.html)), building and fostering an [inclusive remote culture](/company/culture/inclusion/building-diversity-and-inclusion/) and [mentorship programs](/company/culture/inclusion/erg-minorities-in-tech/mentoring/)._\n\nThrough this series, we’ve discussed the [different paths our team members have taken to get into security and tech](/blog/breaking-into-security/), the actual [projects and initiatives they’ve developed, managed and/or implemented at GitLab](/blog/whats-it-like-to-work-security-at-gitlab/), as part of our security team, and their advice to others looking to break into security and take on similar roles.\n\nIn this last blog in our three part series, our team members talk about how they stay motivated and engaged to take on that next challenge, and each one offers up a bit of advice or learnings across different areas like:\n* How to embrace risk taking\n* Starting your career off right in security\n* Learning from the past\n* Whether (or not) you should apply to roles where you may not meet 100% of the qualifications (we’ll cut to the chase on this one ⇒ **YES, do it!**)\n\n\n---\n\n\n### [Julia Lake](/company/team/#julia.lake) - [Director, Security Risk and Compliance](https://handbook.gitlab.com/job-families/security/security-leadership/)\nJoined GitLab April 2020 / Connect with Julia on [LinkedIn](https://www.linkedin.com/in/julia-lake-16843740/)\n\n![Your image alt text](https://about.gitlab.com/images/blogimages/security-career-advice/jlake_blog3.png){: .shadow.small.left.wrap-text}\n\n**What is the most interesting thing you’ve learned about security thus far?**\nThat security, and specifically security compliance, is a business enabler and not a business inhibitor. This is true across the board, but especially true for SaaS providers where customers are trusting us with their highly sensitive data. Implementing strong security practices enables all other aspects of the business to grow. The biggest opportunity security leaders have is making this value proposition clear to the rest of the organization.\n\n**What advice do you have to embrace risk-taking?**\nFrom a business perspective, it's important to determine the [risk appetite and risk tolerance](https://www.isaca.org/resources/news-and-trends/newsletters/atisaca/2020/volume-8/tips-for-setting-or-evaluating-risk-appetite) of the organizational leaders and align your operations accordingly. Risk appetite and tolerance can change as organizations grow and mature, so I recommend measuring both on a minimum of an annual basis.\n\nFrom a personal perspective, I always try to operate with a higher risk appetite, which to me means saying yes to new projects and opportunities - especially those I’m uncomfortable with. This allows me to continue to grow my professional skill set. You never have to be a perfect fit for a new role, but you do have to have the capability and experience to be able to execute on the strategic objectives of that role. I highly recommend this TEDtalk about [taking small risks to increase your luck](https://www.ted.com/talks/tina_seelig_the_little_risks_you_can_take_to_increase_your_luck).\n\n\n### [Jennifer Blanco](/company/team/#jblanco2) - Sr. Risk and Field Security Engineer\nJoined GitLab June 2019 / Connect with Jennifer on [LinkedIn](https://www.linkedin.com/in/jenniferblanco1/)\n\n![Your image alt text](https://about.gitlab.com/images/blogimages/security-career-advice/jblanco_blog3.png){: .shadow.small.right.wrap-text}\n\n**What is the most interesting thing you’ve learned about security and tech thus far?**\nUnderstanding the power (and danger) of data. I had exposure to aspects of consumer law during my days as a paralegal, but working in security has put data security, and my understanding of it, in a whole new light. Data can easily be collected through our everyday smart devices, and many companies are harvesting this information. The best advice I share with people who want to increase their awareness is to employ a general and healthy skepticism of companies; specifically around ways they can impose on privacy. Once you have the details, you can make an informed decision by looking at the costs and benefits carefully so that you can feel confident about your choices.\n\n**Was there ever a role you applied for and landed, but weren't 100% qualified to do?**\nMy first job in Security Compliance was the largest leap for me because I had to learn about software models in addition to the technology industry as a whole. It was exciting but also overwhelming because there was so much to understand and the information was not always easy to glean. I invested a lot of study time and immersion into compliance frameworks, as well as in-person training courses, including a hands-on penetration workshop. It took two years before I was confident that I had the whole picture; though this changes with the industry landscape as there are so many aspects that can affect our line of work. Learning how to work with git and remote repositories at GitLab was the next biggest challenge and the one I’m most proud of! I never imagined having ‘Engineer’ in my title, so I’m motivated to continue pushing myself to see what I can do next.\n\n\n### [Juliet Wanjohi](/company/team/#jwanjohi) - [Security Engineer, Security Automation](https://handbook.gitlab.com/job-families/security/security-automation/#security-engineer-automation-intermediate)\nJoined Joined GitLab May 2020 / Connect with Juliet on [LinkedIn](https://www.linkedin.com/in/juliet-wanjohi/) and [Twitter](https://twitter.com/jay_wanjohi)\n\n![Your image alt text](https://about.gitlab.com/images/blogimages/security-career-advice/jwanjohl_blog3.png){: .shadow.small.left.wrap-text}\n\n**What is the most interesting thing you’ve learned about security thus far?**\nSecurity is a team effort and a shared responsibility. We are now connected more than ever before, therefore we need to approach security with a proactive mindset, starting individually by making sure that each one of us is taking the necessary precautions and following best practices to avoid risk. At an organizational level, no security team carries the entire burden of security alone and this is quite evident here at GitLab where we collaboratively work across our respective security teams to enhance the overall security posture of the company and the product. As we build in the necessary tooling and processes to be secure, we must remember that security is a never-ending journey, not a destination!\n\n**What advice would you give to someone just starting out in the security and tech industry?**\nThere’s no shortage of problems to be solved in the security industry. Every day there’s a new type of cyber threat and with this, comes along the creation of new and innovative career opportunities to solve these problems. In order to find your place in this cog wheel, you need to be curious and willing to explore the different options within the field and see what interests you the most. The next step is to be proactive in acquainting yourself with this area and start to pick up the necessary knowledge and skills to make you an industry expert. Surround yourself with other security professionals who can contribute positively towards your career growth. It’s also important that you work towards being a T-shaped individual where you have deep expertise in your chosen area of interest but also a breadth of knowledge in other areas in the security field.\n\n\n### [Liz Coleman](/company/team/#lcoleman) - [Sr. Security Compliance Engineer](https://handbook.gitlab.com/job-families/security/security-compliance/#senior-security-compliance-engineer)\nJoined GitLab January 2020 / Connect with Liz on [LinkedIn](https://www.linkedin.com/in/elizabeth-coleman-5779418b/)\n\n![Your image alt text](https://about.gitlab.com/images/blogimages/security-career-advice/lcoleman_blog3.png){: .shadow.small.right.wrap-text}\n\n**What is the most interesting thing you’ve learned about security thus far?**\nOne of the most interesting things I’ve learned is that security is an all-inclusive team sport. There are so many layers to security from individuals, general governance, information system security, IT security and the list goes on. Each layer consists of networks of people and processes, all of which have some type of underlying security theme. Security is a consideration and holds a level of importance to everyone and every role in an organization, but in slightly different ways. The great thing about this is that security can be a commonality that can bring people together and be leveraged across all layers of an organization. Its strength lies in its ability to be all-inclusive and everyone’s invited to play the game.\n\n**What advice would you give to someone just starting out in the security or tech industry?**\nJust starting out in the security or tech industry can be intimidating. There are so many certifications and paths available that it can be hard to find a place to start. One thing I found very helpful when starting out was to research where I wanted to go. I turned to my colleagues, professional network and leadership and looked at the certifications and education they had. Linkedin is an open book of information that outlines individual accomplishments. I saw my manager at the time had a [Certified Information System Auditor](https://www.isaca.org/credentialing/cisa) certification. So I started there. Then came the [Certified Information System Security Professional certification](https://www.isc2.org/Certifications/CISSP) which I found to be a common certification held by individuals who had similar professional and career interests. Each certification takes time, effort and costs money to obtain, so being strategic is key. Investigate your options and identify a path based on your interests. See what other professionals have from a knowledge or certification standpoint and go for it!\n\n\n### [Meghan Maneval](/company/team/#mmaneval20) - Manager, Risk and Field Security\nJoined GitLab July 2020 / Connect with Meghan on [LinkedIn](https://www.linkedin.com/in/meghanmaneval/)\n\n![Your image alt text](https://about.gitlab.com/images/blogimages/security-career-advice/mmaneval_blog3.png){: .shadow.small.left.wrap-text}\n\n**What is the most interesting thing you’ve learned about security and technology thus far?**\nI remember when I was just graduating from college and applying for jobs in technology, thinking I was going to come in and be the hot-shot young intern who would make a huge impact. What actually happened was I realized just how little I really knew about technology and security in the real world! What I’ve learned over the years is that it doesn’t matter how much you know about technology or security in general or from a textbook, what matters is how your company applies those concepts. Security controls and methodologies can be applied in millions of different ways! I love meeting with our customers and third parties and seeing all the unique ways they apply and utilize security and technology principles.\n\n**What's a difficult situation you've had to overcome, professionally?**\nIf you ask my kids they will roll their eyes and tell you that my motto in life is “you learn more from the bad stuff than the good.” And I believe that is true in most situations. I’ve found myself in a few bad situations throughout my career and truly believe I have come out of it a better person. In a prior role, as an auditor, I had identified potentially fraudulent activity within the organization’s Human Resources department. When I reported the information to the auditee in my draft report, she decided to go to the organization’s board and have me removed from my position. While I knew that I had done the right thing, it crushed me and made me rethink my desire to stay in the compliance field. However, after taking time to reflect I realized that going through this actually made me a better auditor, a better compliance specialist, and a better employee. I also realized at that point that I wanted to focus less on organizational risk and more on security. I took a job as an auditor for a software company and my career has blossomed since. So always remember- you learn more from the bad stuff than the good and staying true to your values and instincts will ultimately keep you on the right path!\n\n\n### [Mitra Jozenazemian](/company/team/#mjozenazemian) - Senior Security Engineer, [Security Incident Response Team](https://handbook.gitlab.com/job-families/security/security-engineer/#sirt---security-incident-response-team)\nJoined GitLab July 2020 / Connect with Mitra on [LinkedIn](https://www.linkedin.com/in/mitra-jozenazemian-0a05233b)\n\n![Your image alt text](https://about.gitlab.com/images/blogimages/security-career-advice/mjozenazemian_blog3.png){: .shadow.small.right.wrap-text}\n\n**What excites you about working in security?**\nI love challenges and being challenged. We all know there is no network that is 100% secure and it is a matter of time, money and effort for an attacker to be able to gain access to almost any network. So, the challenges in security are ever-present. As a security engineer, you constantly need to think about how to prevent attackers from being able to access your network and if they *are* able to get in; how you can detect and stop them, as quickly as possible.\n\n**What do you wish you had known at the start of your career that you know now?**\nReally, I wish I knew all the things I know today back then. Wait...is that not possible?\n\nOk, if I have to choose *just* one thing I would say: I wish, at any given time in my career, that I would have all the answers to how the security team should best collaborate with colleagues in other teams so that they feel security is there to enable and protect their work, not stop them from doing their job.\n\n\n### [Rupal Shah](/company/team/#rcshah) - [Security Compliance Engineer](https://handbook.gitlab.com/job-families/security/security-compliance/#security-compliance-engineer-intermediate)\nJoined GitLab October 2020 / Connect with Rupal on [LinkedIn](https://www.linkedin.com/in/rupal-shah-57a384/)\n\n![Your image alt text](https://about.gitlab.com/images/blogimages/security-career-advice/rshah_blog3.png){: .shadow.small.left.wrap-text}\n\n**What excites you about working in security?**\nSecurity is ever-changing and impacts everyone! Changes in one area of the business can quickly impact another area and so, everyone must work together to maintain security. This allows me to constantly be learning about other parts of the business that I might not regularly get to be involved with. Things can change at the blink of an eye, but I always feel challenged to keep learning and never have the feeling of being bored.\n\n**Was there ever a role you applied for and landed, but weren't 100% qualified to do?**\nI feel that way about every role I have ever had. Including this one..haha.  However, I think that’s a good thing, otherwise I’d get bored, be unmotivated and leave too soon! In a previous role, I had no background in security, but my manager saw something special in me, took a chance on me and that changed my world. I’m so happy that opportunity came into my life. I think the most important thing is to feel confident in yourself, knowing others already feel that way about you and see something special in you.  As long as you keep a positive, can-do attitude, you can achieve anything you set your mind to.  Just remember, you have to start somewhere and what better time than now!  Anytime I feel unsure, I remember how far along I have come and know how much more I have to learn and keep a positive attitude.\n\n\n### [Heather Simpson](/company/team/#heather) - [Senior External Communications Analyst](https://handbook.gitlab.com/job-families/security/security-analyst/#external-communications)\nJoined GitLab February 2019 / Connect with Heather on [LinkedIn](https://www.linkedin.com/in/heathersimpson700/) and [Twitter](https://twitter.com/heatherswall)\n\n![Your image alt text](https://about.gitlab.com/images/blogimages/security-career-advice/hsimpson_blog3.png){: .shadow.small.right.wrap-text}\n\n**What excites you about working in security or tech?**\nThe nature of the beast that is tech, is that it's ever-changing and evolving. Meaning you’ve got to continually learn new tools, sharpen your skills and freshen your approach to problems.  As a marketing communications professional in the industry this means I need to continue throwing myself into new concepts and tools and pushing myself out of my comfort zone. This has meant that I’ve gotten comfortable with “trying things to see if they’ll work” and holding my breath as I type commands into my terminal 🤣;  knowing that my Google skills are just as good as the next person’s.🤷‍♀️ Thankfully, being a “connector of dots”, as many in marketing are, means I work across the organization and know who will graciously help me dig myself out of a “command gone wrong”.  Working in Tech means I’ll never know all the things (and won’t ever get close) so I’ll always have challenges to overcome and new things to learn; and that’s what keeps me going.\n\n\n**Was there ever a role you applied for and landed, but weren't 100% qualified to do?**\nYes, almost all of them. This one included. When I’d applied to GitLab I’d worked in tech for over 10 years, but had almost no experience in devops, and little experience in security (I spent 2 years as a portfolio marketing manager for a large enterprise tech integrator). However, I’m really motivated by new challenges, LOVE building new programs and have a can-do attitude. I think these are common traits of many team members here at GitLab and my hiring manager at the time saw this in me.\n\nEarly in my career, I moved into a new job and only stayed there for 2 months. I knew within the first week I’d be bored out of my mind because I wasn’t challenged.  I’m not proud of having taken a job only to stay for a few months, but this goes to show that, it's better to have a role where you have to “grow into it” than one where you’ve already been there, done that.  For me, the recipe for success in almost any role or project is a combination of “believe (in you/your skills/your expertise) and achieve”, mixed with heaps of research, planning and doing. Believe that you’ve got the “stuff” to get the job done, figure out the best way to do it and then knock it out of the park! 🚀\n\n## Interested in a career in security or tech? We're hiring!\nYou can check out the [career opportunities page](/jobs/). Don't meet 100% of the qualifications for one of the roles listed there? Still share your information with us! We're hiring within our Security department (and beyond) and looking for unique backgrounds and expertise. You can also learn more about GitLab’s [culture](/company/culture/) and [values](https://handbook.gitlab.com/handbook/values/) in order to get an understanding of what it might be like to work here!\n\n\nCover image by [#WOCinTech Chat](https://www.wocintechchat.com/).\n{: .note}\n",[720,9],{"slug":1481,"featured":6,"template":680},"considering-a-career-in-security","content:en-us:blog:considering-a-career-in-security.yml","Considering A Career In Security","en-us/blog/considering-a-career-in-security.yml","en-us/blog/considering-a-career-in-security",{"_path":1487,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1488,"content":1493,"config":1498,"_id":1500,"_type":14,"title":1501,"_source":16,"_file":1502,"_stem":1503,"_extension":19},"/en-us/blog/contribute-wrap-up",{"title":1489,"description":1490,"ogTitle":1489,"ogDescription":1490,"noIndex":6,"ogImage":1262,"ogUrl":1491,"ogSiteName":667,"ogType":668,"canonicalUrls":1491,"schema":1492},"What we learned at Contribute 2019","Community is everything, all remote makes contribution possible, CMO Todd Barr plays a mean trumpet, and more takeaways from Contribute 2019.","https://about.gitlab.com/blog/contribute-wrap-up","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What we learned at Contribute 2019\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"},{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-06-04\",\n      }",{"title":1489,"description":1490,"authors":1494,"heroImage":1262,"date":1495,"body":1496,"category":808,"tags":1497},[869,672],"2019-06-04","\n\n“Community is the best part of GitLab.”\n\nThat message, from the [keynote presentation](https://youtu.be/kDfHy7cv96M) during [Contribute 2019 in New Orleans](/events/gitlab-contribute/), sums up the spirit of GitLab’s seventh all-company gathering. Sure CMO (and MC) [Todd Barr](/company/team/#tbarr) added his trumpet to a NOLA classic, \"When the Saints Go Marching In,\" while others shared potentially embarrassing photos and anecdotes from the past. Contribute newbies, who represented more than half of the over 500 attendees, got advice on how to make the most of the unique event, and CEO [Sid Sijbrandij](/company/team/#sytses) made a clear and compelling case for remote work.\n\nBut what stood out most were the ways “community” plays such a vital role at GitLab. “This is our first Contribute,” Sid said. “We changed the name to remind everyone of our mission, that [everyone can contribute](/company/mission/#mission).” In fact, in the product release before Contribute, contributions from the community to GitLab reached an all-time high of 195, Sid said. Because the company is all remote, “everyone can contribute to GitLab on equal footing.”\n\nIn the spirit of community contributions, we asked GitLabbers to share their top takeaways, advice, and feel-good moments from Contribute.\n\n## Your best self\n\n“I’m so pumped for where GitLab is heading,” said strategic account leader [Adam Olson](/company/team/#adamsolson). “Contribute has inspired me to be better GitLabber. (I want to) win more customers while learning more from others.”\n\n## Network like it matters\n\n[Heather Simpson](/company/team/#Heatherswall), senior external communications analyst, got more out of Contribute than expected. “I think because the main focus of Contribute was to spend time getting to know our team members and having fun, the quantity and **quality** of connections I made far exceeded any I'd made at networking or \"team building\" conferences I'd attended with companies in the past.”\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Our \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> CEO put forth a challenge to make our product better while we’re down in \u003Ca href=\"https://twitter.com/hashtag/NOLA?src=hash&amp;ref_src=twsrc%5Etfw\">#NOLA\u003C/a> at \u003Ca href=\"https://twitter.com/hashtag/GitLabContribute?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabContribute\u003C/a> and teams got to work and made several iterative improvements, so \u003Ca href=\"https://twitter.com/sytses?ref_src=twsrc%5Etfw\">@sytses\u003C/a> made good on his word and donned this amazing costume (his wife too!) So good. \u003Ca href=\"https://t.co/8nfQCt0NV0\">pic.twitter.com/8nfQCt0NV0\u003C/a>\u003C/p>&mdash; Heather Simpson 🌈🍃 (@heatherswall) \u003Ca href=\"https://twitter.com/heatherswall/status/1128129734934704129?ref_src=twsrc%5Etfw\">May 14, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n## Spread the wealth\n\n“This was most definitely the best Contribute ever,” said GitLab's unofficial bacon ambassador [Richard “Reb” Baum](/company/team/#therebbie) (who is also a solutions architect). “Focusing on building relationships allowed us to spread the culture and feel of the company to the large number of new people who have joined since the previous event. As an all-remote company, this is critical to our ongoing success.”\n\n## Continued inspiration\n\n“As an early employee here at GitLab and my sixth [Summit](/company/culture/contribute/previous/), I have never felt more inspired after this week in New Orleans,” said [Philip Camillo](/company/team/#pmanjr311), enterprise account executive. “Working remotely, it’s hard to contextualize hiring 10-12 people a week, and it only hit home when I first walked into the opening keynote. Seeing over 500 people in the main room simply left me speechless.\n\n“Leaving Contribute, I’m inspired by all the team members who received awards and all the people who have helped build the product over the years, as well as new team members making an impact immediately by just jumping in.\n\n“Imagine what we will create if we all work towards generating as much value as possible and making everyone around us inspired. Meeting everyone this last week also made me realize that people see you, and the hard work doesn’t go unnoticed. Working remotely, it can be a bit more difficult to see the direct impact you’re making, and the personal brand you’re building with your colleagues.”\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">A full house of GitLabbers celebrating and gathering around the notion that Everyone Can Contribute \u003Ca href=\"https://twitter.com/hashtag/GitLabContribute?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabContribute\u003C/a> \u003Ca href=\"https://t.co/dWHiSPZGtV\">pic.twitter.com/dWHiSPZGtV\u003C/a>\u003C/p>&mdash; John Northrup (@northrup) \u003Ca href=\"https://twitter.com/northrup/status/1126498724518268929?ref_src=twsrc%5Etfw\">May 9, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n## Decisions at the speed of light\n\n“I took a lot of notes about the keynote but the thing that really stuck out to me was how Sid emphasized speed of decision making,” said [Emilie Schario](https://gitlab.com/emilie), data engineer, analytics. “That was really a lightbulb moment for me.”\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">My awesome teammate \u003Ca href=\"https://twitter.com/rspaik?ref_src=twsrc%5Etfw\">@rspaik\u003C/a> kicking off day two. Amazing stat he shared: 13.5% of merged MRs to \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> come from our community. \u003Ca href=\"https://twitter.com/hashtag/GitLabContribute?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabContribute\u003C/a> \u003Ca href=\"https://t.co/1CUcyFF70y\">pic.twitter.com/1CUcyFF70y\u003C/a>\u003C/p>&mdash; John Coghlan (@john_cogs) \u003Ca href=\"https://twitter.com/john_cogs/status/1126853746754039809?ref_src=twsrc%5Etfw\">May 10, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nYou can check out the rest of the highlights from Contribute below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xdtPNXtkBhE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nVideo directed and produced by [Aricka Flowers](/company/team/#arickaflowers)\n{: .note}\n",[267,677,9,832],{"slug":1499,"featured":6,"template":680},"contribute-wrap-up","content:en-us:blog:contribute-wrap-up.yml","Contribute Wrap Up","en-us/blog/contribute-wrap-up.yml","en-us/blog/contribute-wrap-up",{"_path":1505,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1506,"content":1512,"config":1519,"_id":1521,"_type":14,"title":1522,"_source":16,"_file":1523,"_stem":1524,"_extension":19},"/en-us/blog/contributing-to-gitlab-with-ease",{"title":1507,"description":1508,"ogTitle":1507,"ogDescription":1508,"noIndex":6,"ogImage":1509,"ogUrl":1510,"ogSiteName":667,"ogType":668,"canonicalUrls":1510,"schema":1511},"Contributing to GitLab with ease","Everyone can contribute to GitLab, so here are a few tips to make your experience easy and pleasant.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678997/Blog/Hero%20Images/mergerequestsgame.jpg","https://about.gitlab.com/blog/contributing-to-gitlab-with-ease","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Contributing to GitLab with ease\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lin Jen-Shin\"}],\n        \"datePublished\": \"2018-08-23\",\n      }",{"title":1507,"description":1508,"authors":1513,"heroImage":1509,"date":1515,"body":1516,"category":1517,"tags":1518},[1514],"Lin Jen-Shin","2018-08-23","\nAs a [Merge Request Coach](https://handbook.gitlab.com/job-families/expert/merge-request-coach/), I am happy to\nhelp community contributors feel comfortable when contributing\nto GitLab. During my time reviewing merge requests, I’ve learned a bit about\nhow it feels contributing to GitLab as a newcomer, and I’d like to share\nmy learnings with you.\n\n## Common issues in an MR (merge request)\n\nIn the past, I think styling might have been one of the most common issues.\nHowever, we’re improving our CI to run more static analysis, so these issues\nare now automatically pointed out. Today, contributors can easily see what\ndidn’t pass CI, and they can fix the issues very quickly, so this is not as\ncommon as it was in the past.\n\nThe biggest issue today might be that many contributors don’t add tests, since\ntests often require much more effort than fixing or adding something. If\nyou’re struggling with adding tests, please don’t worry. Merge request coaches\ncan tell you how to add tests when we see your contribution, and we’ll work\nthrough it together.\n\n## Best practices\n\n1. If you only remember one best practice, I hope it is to keep this\nreference handy when [contributing to GitLab](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/doc/development/contributing/index.md).\nI know it’s super long, but it has all the information you need when it comes\nto making contributions to GitLab.\n\n2. Get [GDK](https://gitlab.com/gitlab-org/gitlab-development-kit) set up\nlocally if you haven’t already. Running tests locally is the best way to\ndevelop and debug, and I highly encourage that you incorporate this into your\nworkflow.\n\n3. Don’t ignore CI. If your pipeline didn’t pass, it’s important to go back and\nidentify the problem. Troubleshooting issues is a great way to practice your\nskills and help you learn from mistakes.\n\n4. Look at the [GitLab team page](/company/team/) and pick a merge request coach to\nping if you need help. Merge request coaches guide contributors and will even\njump in to help finish an MR if a contributor can no longer work on it,\nensuring that the attribution stays with the original contributor. Our goal is\nto help everyone feel comfortable and empowered to contribute even with\nsmallest possible effort. Coaches have other responsibilities and don’t always\nproactively look for contributors who need help, so ping them if you’re stuck\nor ready for a review. If they’re not the right person to ping, they’ll pass\nyou over to the right one. We love helping community contributors, and we look\nforward to guiding and working with you.\n\n## Little-known features\n\nWe [recently welcomed](/blog/introducing-gitlab-s-integrated-development-environment/)\nWeb IDE to quickly edit multiple files on the web directly without cloning\nthe whole repository. Web IDE is useful if you just want to make some small\nchanges online. If you’d like to learn more about Web IDE, please\nhead over to our [documentation](https://docs.gitlab.com/ee/user/project/web_ide/).\n\nSince GitLab's development velocity is pretty high, sometimes conflicts can\nhappen very frequently. Did you know that you can resolve conflicts directly\nfrom the web UI? I really love this feature, because it’s very easy to resolve\nsimple conflicts, and I don’t need to launch my editor or Git to pull, merge,\nand push. With some simple clicks, I can save a lot of time for simple\nconflicts.\n\n## What everyone should know about MRs\n\nTo me, an MR is a tool to interactively develop and explore with other people.\nDon’t worry about being perfect in the first version of your MR. We learn\nthrough our mistakes and get better over time.\n\nIf you’ve made tons of contributions, we invite you to join our\n[core team](/community/core-team/) or apply for a [full-time position](/jobs/) at GitLab.\nThe MR is one of the most important ways we work together, and we’d love to\ncollaborate with you.\n\n## What to do if you’re struggling\n\nIf you’re having some trouble getting the hang of merge requests, I suggest\ntaking a look at how others work on the MRs. Following other people’s example\ncan help you understand what they did and why they did it. Reaching out to a\nmerge request coach, joining discussions, and reviewing others’ code are also\nways to help you get up to speed. I think that interacting with others is a\ngreat way to learn and improve.\n\n## We’d love your contributions!\n\nWe really enjoy collaborating with community contributors, and we look forward\nto working together. If you don't know what you can contribute, please take a\nlook at [`Accepting merge requests`](https://gitlab.com/gitlab-org/gitlab-ce/issues?label_name[]=Accepting+merge+requests).\nWe label some issues to explicitly call out the ones that we won’t schedule\nanytime soon, but we still want it. These issues usually have very clear scopes,\nso they often just require a simple implementation. They’re nice targets if\nyou don’t know what to contribute but want to gain experience.\n\nIf you would like to see how we handle community contributions, please take a\nlook at [`Community contribution`](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests?label_name[]=Community%20contribution).\nWe put this label on all community contributions, therefore you can easily\nfind all the past and current community contributions. We look forward to\nyour future contributions as well!\n\n[Cover image](https://unsplash.com/photos/vqDAUejnwKw) by\n[Victor Freitas](https://unsplash.com/@victorfreitas), licensed\nunder [CC X](https://unsplash.com/license).\n{: .note}\n","open-source",[267,811,993,9,745],{"slug":1520,"featured":6,"template":680},"contributing-to-gitlab-with-ease","content:en-us:blog:contributing-to-gitlab-with-ease.yml","Contributing To Gitlab With Ease","en-us/blog/contributing-to-gitlab-with-ease.yml","en-us/blog/contributing-to-gitlab-with-ease",{"_path":1526,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1527,"content":1533,"config":1538,"_id":1540,"_type":14,"title":1541,"_source":16,"_file":1542,"_stem":1543,"_extension":19},"/en-us/blog/coreos-acquisition",{"title":1528,"description":1529,"ogTitle":1528,"ogDescription":1529,"noIndex":6,"ogImage":1530,"ogUrl":1531,"ogSiteName":667,"ogType":668,"canonicalUrls":1531,"schema":1532},"Red Hat follows GitLab's lead in hybrid cloud technology","Red Hat’s recent acquisition of CoreOS proves that GitLab’s hybrid cloud strategy is worth the investment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680202/Blog/Hero%20Images/coreos.jpg","https://about.gitlab.com/blog/coreos-acquisition","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Red Hat follows GitLab's lead in hybrid cloud technology\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2018-02-27\",\n      }",{"title":1528,"description":1529,"authors":1534,"heroImage":1530,"date":1535,"body":1536,"category":787,"tags":1537},[762],"2018-02-27","\n\nRed Hat's acquisition of CoreOS illustrates the growing importance of adopting a hybrid cloud strategy.\n\n\u003C!-- more -->\n\nIn a market-validating move, [Red Hat acquired CoreOS](https://www.redhat.com/en/about/press-releases/red-hat-acquire-coreos-expanding-its-kubernetes-and-containers-leadership), a player in the container technology space, for $250 million. The acquisition comes at a pivotal time in the hybrid cloud market as containers are increasingly becoming a necessity in enabling application portability across multiple clouds. The portability of containers has heightened demand for container management solutions, with organizations actively seeking to find solutions to help them transition their existing applications to the hybrid cloud.\n\nThe acquisition has broad implications on the market and adds validation to [our mission](/company/strategy/) to develop the leading end-to-end software development and operations tool for [cloud native development](/topics/cloud-native/).\n\n## A future-focused strategy\n\nHybrid cloud is the future of technology, and every organization should make its adoption a business imperative. Hybrid cloud gives organizations the flexibility to begin with in-house data centers, scale up with external cloud resources, and adopt or revert to solutions based on changing needs. Hybrid cloud is a customizable strategy that won’t restrict your development and operations and gives you the freedom to leverage existing, low-cost cloud solutions when you need them.\n\nWith the trajectory of software innovation and a customer-driven demand for a simplified solution, cloud native development is the next step in digital transformation. Hybrid cloud technology uses a combination of both physical and multiple cloud platforms, such as Amazon and Azure, increasing the need for a single way to enable faster development velocity while maintaining operational stability.\n\nBecause a hybrid cloud strategy allows developers to quickly adjust development and operations based on need, developers can focus on code improvements and new features, rather than turning their attention to brainstorming ways to scale.\n\n## On cloud nine\n\nContainer technology enables you to simplify deployment of runners, review apps, and your own applications on multiple clouds, including AWS, Azure, and Google, providing you with multiple advantages in development. The ability to switch easily between different clouds gives you the freedom to select options based on price and to make adjustments as costs change over a development lifecycle. If you decide to run an in-house data center and suddenly need to scale beyond your existing hardware, you can quickly leverage the public cloud using the same technology.\n\nContainer schedulers, such as [Kubernetes](/solutions/kubernetes/), provide a common platform from which to automate your management of application containers, from deploying and scaling to operating, so getting started with a hybrid cloud strategy can be a breeze if you have the right solution.\n\n## GitLab has you covered\n\nGitLab is the leader in cloud native development and has pioneered everything you need for end-to-end software development and operations. We have developed a compelling product that covers the entire DevOps lifecycle with a [single application](/direction/#single-application) based on [convention over configuration](/handbook/product/product-principles/#convention-over-configuration). With a [built-in container registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html), Kubernetes integration, and [CI/CD](/features/continuous-integration/), GitLab is a complete, easy-to-implement solution for your cloud strategy. GitLab is the first end-to-end application to meet the needs of developers at all stages of the development and operations lifecycle.\n\nAs a new generation of software emerges, GitLab has set the standard in providing you with the tools to build, test, deploy, and run your app at scale. A hybrid cloud strategy is no longer a unique way to gain a competitive advantage. It’s the only way to ensure visibility, security, and stability across multiple environments.\n\n[Cover image](https://pixabay.com/en/business-cargo-containers-crate-1845350/) licensed\nunder [CC X](https://pixabay.com/en/service/terms/#usage)\n{: .note}\n",[675,9],{"slug":1539,"featured":6,"template":680},"coreos-acquisition","content:en-us:blog:coreos-acquisition.yml","Coreos Acquisition","en-us/blog/coreos-acquisition.yml","en-us/blog/coreos-acquisition",{"_path":1545,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1546,"content":1552,"config":1558,"_id":1560,"_type":14,"title":1561,"_source":16,"_file":1562,"_stem":1563,"_extension":19},"/en-us/blog/create-vision",{"title":1547,"description":1548,"ogTitle":1547,"ogDescription":1548,"noIndex":6,"ogImage":1549,"ogUrl":1550,"ogSiteName":667,"ogType":668,"canonicalUrls":1550,"schema":1551},"GitLab's 2019 product vision for DevOps Create","Take an early look at where collaboration, merge requests, and the Web IDE are heading in 2019.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678812/Blog/Hero%20Images/web-ide-cover.jpg","https://about.gitlab.com/blog/create-vision","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's 2019 product vision for DevOps Create\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"James Ramsay\"}],\n        \"datePublished\": \"2018-09-21\",\n      }",{"title":1547,"description":1548,"authors":1553,"heroImage":1549,"date":1555,"body":1556,"category":299,"tags":1557},[1554],"James Ramsay","2018-09-21","\nGitLab is a single application, so for convenience we organize by [DevOps stages](/handbook/product/categories/). The Create stage of the DevOps lifecycle is about creating code, and includes Git repositories, merge requests, code review, the Web IDE, wikis, and snippets.\n\nManaging source code is at the heart of GitLab – it's in our name and it powers your applications. This year we've shipped many important improvements to make it easier to go from idea to production. The [Web IDE](/releases/2018/06/22/gitlab-11-0-released/#cicd-pipeline-status-and-job-traces-in-the-web-ide) makes it easy for anyone to contribute, and faster to work with merge requests. [Squash and Merge](/releases/2018/06/22/gitlab-11-0-released/#squash-and-merge-in-gitlab-core-and-gitlabcom-free), and [Rebase and Fast-forward Merge](/releases/2018/01/22/gitlab-10-4-released/#rebase-and-fast-forward-in-ce) are available in GitLab CE. [File locking](/releases/2018/02/22/gitlab-10-5-released/#git-lfs-2-locking-support) is integrated with Git LFS. [Maintainers can push to forks](/releases/2018/03/22/gitlab-10-6-released/#maintainers-can-push-to-mr-from-fork). And there is much more to come this year, like [batch comments](https://gitlab.com/gitlab-org/gitlab-ee/issues/1984) for merge requests, and [suggested approvers](https://gitlab.com/gitlab-org/gitlab-ee/issues/5382) based on code owners.\n\nHere are some of the things we're thinking about for 2019:\n\n- [Collaboration](#collaboration)\n- [Code review and approvals](#code-review-and-approvals)\n- [Web IDE](#web-ide)\n- [Summary](#summing-up)\n\nAs our plans are always in draft, we'd love to hear your thoughts, and any suggestions.\n\n### Collaboration\n\nGit's distributed design made new collaborative workflows possible, and forking has made collaboration even easier. Forking is the workflow of choice for open source, and for the same reasons it is also great for private organizations. We want to remove the barriers to collaboration and [inner sourcing](/topics/version-control/what-is-innersource/), but also make it easier to collaborate with external open source projects too.\n\nThe distributed capabilities of Git aren't limited to a single server. Open source software is used extensively in commercial applications of all kinds, but collaboration between open source projects and commercial is difficult. Features and bug fixes to open source projects can sit in stale forks in private Git repositories for lack of tools and process. [Distributed merge requests](https://gitlab.com/groups/gitlab-org/-/epics/260) will make it easy publish a patch from a private GitLab instance to a public upstream server, be it GitLab, GitHub or Bitbucket. Teams will be able to work on a patch privately following internal processes, but instead of merging the reviewed and tested change privately, it can be published to a new public merge request upstream. Contributing fixes and features upstream isn't only good for the community, but it also makes commercial sense by eliminating the costly task of keeping a stale, private fork up to date. We want to make it easy for everyone to contribute to open source software, as individuals and as companies!\n\n![Mockup of distributed merge request widget](https://about.gitlab.com/images/blogimages/merge-request-distributed.png){: .medium.center.shadow}\n\nWe'll also be improving simpler forking workflows too with important quality-of-life improvements. To make it easy to see how far behind or diverged your fork is, we will make it possible to [compare branches](https://gitlab.com/gitlab-org/gitlab-ce/issues/19788) across forks and [cherry pick](https://gitlab.com/gitlab-org/gitlab-ce/issues/43568) changes directly from the upstream project into your fork. Forks of private projects will also [inherit permissions](https://gitlab.com/gitlab-org/gitlab-ce/issues/8935) from the upstream project, making it possible for upstream maintainers to rebase stale merge requests and help contributors. This will allow teams to adopt forking workflows without needing to make every project public to the world or to the organization.\n\n### Code review and approvals\n\nMerge requests are key to the workflows that allow teams to iterate rapidly and ship amazing products quickly, by bringing together all the important information in a single place. Critical to this workflow is the code review, and we want GitLab to be the best tool for doing code reviews.\n\nAutomatic code quality and linting tools can prevent code reviews becoming simple code style reviews, but without the inline feedback a reviewer can't be sure which problems have been automatically detected. A new [API for line by line code quality feedback](https://gitlab.com/gitlab-org/gitlab-ce/issues/50299) will allow output from tools to be rendered natively in GitLab in the merge request diff. Merge request authors will have a single source of truth, and code reviewers can confidently focus on important structural feedback.\n\nCode review feedback cannot truly be resolved and the merge request approved until the reviewer checks the feedback was correctly addressed. This step prevents feedback from being misunderstood or overlooked, but it is currently difficult and time consuming. We are going to streamline this important step by allowing you to [review changes since code review](https://gitlab.com/groups/gitlab-org/-/epics/314) and making [merge request diffs smarter](https://gitlab.com/groups/gitlab-org/-/epics/340). When the change is straightforward, we're going to make it possible to simply [propose a change](https://gitlab.com/gitlab-org/gitlab-ce/issues/18008) as easily as leaving a comment that can be applied with a single click – no more copying and pasting `sed` one liners! And we're going to make it easier to [view and add comments to commits](https://gitlab.com/gitlab-org/gitlab-ee/issues/1769) at any time.\n\nIn the real world, complex features often require large, complex merge requests. We will support these situations better with [commit by commit code review](https://gitlab.com/groups/gitlab-org/-/epics/285), autosquashing [`fixup!`](https://gitlab.com/gitlab-org/gitlab-ee/issues/212) and [`squash!`](https://gitlab.com/gitlab-org/gitlab-ce/issues/50400) commits, and allowing you to [preview](https://gitlab.com/gitlab-org/gitlab-ee/issues/7259) the resultant squashed commits.\n\nComplex real-world changes also need good commit messages, but commit messages are too easily neglected. Without good commit messages, debugging a regression, or modifying an important existing function is painful and error prone. To help teams adopt best practice [commit hygiene](/blog/keeping-git-commit-history-clean/), we will make [commit messages part of code review](https://gitlab.com/groups/gitlab-org/-/epics/286) by allowing comments on commit messages, improving the [visibility of commit messages](https://gitlab.com/gitlab-org/gitlab-ce/issues/49803), and making [squash and merge smarter](https://gitlab.com/gitlab-org/gitlab-ce/issues/47149). GitLab should celebrate great commit messages and amplify their benefits to make it easier for teams to adopt best practices.\n\n### Web IDE\n\nIn 2018 we're building a strong foundation for a cloud development environment with [client side evaluation](https://gitlab.com/gitlab-org/gitlab-ce/issues/47268) and [server side evaluation](https://gitlab.com/gitlab-org/gitlab-ee/issues/4013) powered live previews, and server side evaluation will also enable a [web terminal](https://gitlab.com/gitlab-org/gitlab-ee/issues/5426) to test your changes in real time. IDEs are also very personal and should support customization, to make it easy to move between your local IDE and GitLab IDE. Please share your feedback, and consider contributing – I'd love to see support for [dark syntax themes](https://gitlab.com/gitlab-org/gitlab-ce/issues/46334) and [vim keybindings](https://gitlab.com/gitlab-org/gitlab-ce/issues/47930)!\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/sSWu6TyubTE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe Web IDE makes it easier than ever to resolve code review feedback, reducing the need to switch context in your local development environment, but we can make it even better. Addressing a comprehensive code review still requires switching backwards and forwards between the merge request and the Web IDE. [Line by line code quality feedback](https://gitlab.com/gitlab-org/gitlab-ce/issues/50299) available in the merge request diff will also be available in the Web IDE as will [live linting feedback](https://gitlab.com/groups/gitlab-org/-/epics/70) powered by server side evaluation so to help prevent new code styling problems being created while resolving feedback.\n\nWe are also considering integrating [merge request discussions](https://gitlab.com/groups/gitlab-org/-/epics/72) so that code review comments can be addressed without needing to continually switch between tabs. We don't think the Web IDE should replace the merge request, nor should every feature be duplicated into it, but do think the Web IDE can further simplify the process for resolving code review feedback so teams can iterate faster.\n\n### Summing up\n\nWriting, reviewing, and merging code is where the rubber hits the road when taking your app from idea to production, and in 2019 we want it to be better than ever before!\n\nThe [GitLab product vision](/direction/) is public so you can read up on what we're thinking about at any time, about every part of the product. Please join the conversation and share your feedback on these ideas, and offer ideas of your own! Your contributions – idea or code – are welcomed and appreciated so that we can all work together to make GitLab the best application to build and ship your next great idea.\n",[9,677,811,723,1440],{"slug":1559,"featured":6,"template":680},"create-vision","content:en-us:blog:create-vision.yml","Create Vision","en-us/blog/create-vision.yml","en-us/blog/create-vision",{"_path":1565,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1566,"content":1572,"config":1579,"_id":1581,"_type":14,"title":1582,"_source":16,"_file":1583,"_stem":1584,"_extension":19},"/en-us/blog/creating-a-threat-model-that-works-for-gitlab",{"title":1567,"description":1568,"ogTitle":1567,"ogDescription":1568,"noIndex":6,"ogImage":1569,"ogUrl":1570,"ogSiteName":667,"ogType":668,"canonicalUrls":1570,"schema":1571},"How we’re creating a threat model framework that works for GitLab","As usual, we’re creating our own path in how we handle our threat modeling, approaching development both iteratively and collaboratively, and seriously shifting left with our framework and processes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682058/Blog/Hero%20Images/pexels-nathan-j-hilton.jpg","https://about.gitlab.com/blog/creating-a-threat-model-that-works-for-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we’re creating a threat model framework that works for GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2021-07-09\",\n      }",{"title":1567,"description":1568,"authors":1573,"heroImage":1569,"date":1575,"body":1576,"category":720,"tags":1577},[1574],"Mark Loveless","2021-07-09","\n\nThis is the first in a series of three blog posts where we discuss [threat modeling](/handbook/security/threat_modeling/) and how we’re [using it at GitLab](/handbook/security/threat_modeling/howto.html) to help secure our product, our company, and most importantly our customer’s data. As usual, [we’re doing things a bit differently](https://gitlab.com/gitlab-com/gl-security/security-research/threat-modeling-template), but when you hear why it will make a lot of sense.\n\n## Threat modeling\n\nLet’s start with the basics, what is threat modeling?\n\nThreat modeling is the process of risk assessment for a particular project, asset, procedure, or product. While it can apply to nearly any established or new procedure, it seems to most often get applied to software. For GitLab, this would mainly apply to our source code.\n\nAs assessing risk has historically been the domain of the security department of most organizations, the threat modeling process has been nearly exclusively handled by the [security department](/handbook/security/#security-department) here at GitLab. This does make a lot of sense on many levels, and many threat modeling scenarios are exclusively managed by those within the security department.\n\n## How does it work? In theory and in practice?\n\nThe general process of developing a threat model does vary, but it typically breaks down as follows:\n\n* Scope out what is to be included in the threat model process.\n* Define the potential attackers or situations that could create a security problem.\n* Assess the associated risks with the process or procedure.\n* Fix all the problems identified.\n\nThis sounds fine, but there are a few things that cause problems for a lot of organizations, especially bleeding edge companies that push boundaries. Here are a few:\n* In spite of the attempts to “shift left” it is often that most security departments look at the new code or new project towards the end of the project. In lucky cases, they are involved in the middle; but ideally they should be included in the beginning phases.\n* In large organizations with many projects, there are not enough security team members to handle the workload; especially in a shop that is constantly developing and releasing code. Depending on the project, it could take hours to simply get a security team member up to speed, assuming everyone had the free time to spend doing so. Basically, it doesn’t scale as there are simply not enough personnel to get all of the work done.\n* The models used for this are extremely thorough but also extremely complex. They can involve intricate diagrams, require input from multiple parties that may not fully understand what the other parties are doing, and use language to describe their layered steps that can be confusing and, well, quite boring.\n* No one, and I mean **no one** seems to enjoy creating a threat model.\n\n## Finding a framework we could adapt\n\nFirst off, we had to decide on a few things up front. We wanted to come up with some type of framework that allowed us to easily adopt a threat modeling process into our existing processes. Our existing processes work quite well, and we knew that if we were going to introduce something into that process, it would have to be simple.\n\nWe had to address all of the concerns that we had identified as a part of the overall threat model process and either reduce their impact or eliminate them entirely. **The threat modeling had to scale and fit into the existing development processes, not the other way around.**\n\nAsking a group of developers to learn some new process such as the process of creating elaborate diagrams that define data classification, authentication zones, permissions, and many other detailed items just didn’t make any sense. Sure, you can get a sense of part of the information being modeled, but does one have to learn some complex diagramming software package in the process?\n\nGitLab is 100% remote and 100% spread out all over the planet, and we manage to work asynchronously. Whatever process for threat modeling we were going to use was going to require the ability to work asynchronously while doing it.\n\nAfter choosing our general framework, we had to strip it down and make it fit with our existing processes, develop a “plan” on how to use it, test it, and then introduce it into the usual steps. This took a bit of time, but we came up with something.\n\n## PASTA as a base\nWe use the [PASTA](https://www.wiley.com/en-us/Risk+Centric+Threat+Modeling%3A+Process+for+Attack+Simulation+and+Threat+Analysis-p-9780470500965#) framework as a base, and with all of the adjustments we’ve made to fit GitLab’s unique environment and processes, we are already seeing positive results from our own framework. Here are some of the features:\n* It is easy to understand.\n* It scales.\n* It enhances DevSecOps with minimal overhead.\n* It is based off of an existing framework with an established track record.\n* It works nicely with existing processes within our Security department.\n* It doesn’t just apply to coding projects; it can apply to any project, including those in Infrastructure, Marketing, Sales, and other departments.\n\nThe advantages of our adoption and modification of the PASTA framework allows us to have a common language with those outside of the weird security world, and other departments within GitLab can also understand it. This well-known framework even allows us to have discussions with partners, customers, and contributors about security and risk and threat and not worry about whether they’ll be able to understand us.\n\nBut the biggest change we’ve made is not “how” but “where” and “who.” While our Security team owns the framework, we don’t “run” it. It is run by the people who are running the project. *Let me explain...*\n\nLet’s say we have a department in Engineering that is getting ready to start a new or existing project. They have a list of steps they need to run by the Security team as a part of the procedure they would normally follow. One of those steps is for that Engineering department to perform their *own* threat model. We’re available for questions, but as they know the project far better than we do, they come up with a really good model. The idea is that they will uncover a few gotchas and will fix problems either before or during the coding process. And they do!\n\nThe main tool we have available for this is a [threat modeling process that includes a template](/handbook/security/security-engineering/application-security/runbooks/threat-modeling.html), and they use this to create a markdown file (something everyone at GitLab does all the time) to record the basic steps taken during threat modeling. This way when it is time for the Security team review, which is usually near the end of the project, we can review what they’ve done. Of course there are going to be times when we will still send things back for a fix, but the vast majority of everything is already corrected!\n\nWe not only get through the threat modeling process, but the code being developed is more secure, the time to complete this added process is minimal, and it scales. It is **efficient**. It is **effective**. It is the [best kind of boring](https://handbook.gitlab.com/handbook/values/#boring-solutions).\n\n## What's next\nIn the next blog post in this series, we will take a deeper dive into the framework, including how in some cases we can use a “subset” of a full PASTA framework, and how we reached some of the decisions on our “modifications.”\n\nPhoto by [Nathan J Hilton](https://www.pexels.com/@radmondo?utm_content=attributionCopyText&utm_medium=referral&utm_source=pexels) on [Pexels](https://www.pexels.com/photo/steel-frame-building-in-modern-style-5261943/?utm_content=attributionCopyText&utm_medium=referral&utm_source=pexels)\n\n{: .note}\n",[720,9,1578],"security research",{"slug":1580,"featured":6,"template":680},"creating-a-threat-model-that-works-for-gitlab","content:en-us:blog:creating-a-threat-model-that-works-for-gitlab.yml","Creating A Threat Model That Works For Gitlab","en-us/blog/creating-a-threat-model-that-works-for-gitlab.yml","en-us/blog/creating-a-threat-model-that-works-for-gitlab",{"_path":1586,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1587,"content":1592,"config":1598,"_id":1600,"_type":14,"title":1601,"_source":16,"_file":1602,"_stem":1603,"_extension":19},"/en-us/blog/crucial-conversations",{"title":1588,"description":1589,"ogTitle":1588,"ogDescription":1589,"noIndex":6,"ogImage":690,"ogUrl":1590,"ogSiteName":667,"ogType":668,"canonicalUrls":1590,"schema":1591},"Having crucial conversations on an all-remote team","Exploring crucial conversations and the way they fit into our values here at GitLab.","https://about.gitlab.com/blog/crucial-conversations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Having crucial conversations on an all-remote team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Samantha Lee\"}],\n        \"datePublished\": \"2021-02-18\",\n      }",{"title":1588,"description":1589,"authors":1593,"heroImage":690,"date":1595,"body":1596,"category":698,"tags":1597},[1594],"Samantha Lee","2021-02-18","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nLast week, I attended the [Crucial Conversations training](https://www.vitalsmarts.com/crucial-conversations-training/). Since joining the GitLab Learning and Development team back in October of 2020, requests for support in having difficult conversations with team members have been a recurring theme from people leaders. I completed this training as the first step in a two-part training that will enable myself and other members of the Learning and Development team to be certified to train the GitLab team in having crucial conversations.\n\nIn this post, I'll outline a few key takeaways from the course, share how crucial conversations look in an all-remote work environment, and explain how crucial conversations connect to our [CREDIT values](https://handbook.gitlab.com/handbook/values/).\n\n### What are crucial conversations?\n\nWhen a conversation turns crucial, emotions and stressors are running high. Crucial conversations can occur any day, at any time, with any person. They can be planned or they can come out of a casual conversation.  \n\nCrucial conversations usually address one of three topics, but it's not abnormal for a crucial conversation to touch multiple topics!\n\n1. **Content**: This could be a crucial conversation about a one-time issue, like a missed deadline, forgotten appointment, or an aggrivating comment. Content conversations address what happened and how to move forward from it.\n\n2. **Pattern**: When topics of content conversations happen time after time, they become a pattern conversation. Crucial conversations to address patterns could be centered around multiple missed responsibilities or repetitive comments that impact a team's ability to work together efficiently. At home, maybe your requests to your partner to take their phone calls in another room to keep a quiet workspace have been repeatedly ignored. Or at work, your direct report has missed the end of month reporting deadline for 3 months in a row. It's important to address pattern conversation early to get to the root cause, which is likely a content issue.\n\n**A quick note about pattern conversations:** At the time of writing this blog post, our world has just hit the one-year mark of life during the Covid-19 pandemic. While addressing patterns is important, it's equally as important to treat each other with kindess and understand that pandemic-induced stress might show itself in problematic patterns. All the more reason to have a conversation about it!\n\n3. **Relationship**: Here's when things get sticky. Content and pattern conversations are about the action happening (or not happening). But relationship conversations are about the _people having the conversation_. These crucial conversations could be about a lack of trust or mutual respect in a relationship, differing communication styles, or lack of agreement on a project or plan of action. It's also important to remember that conversations intended to be content or pattern-focused can turn into relationship conversations quickly, especially when the person feels an emotional tie to the work or action being discussed.\n\nUnderstanding what crucial conversations **are** is as important as understanding what crucial conversations **are not**. Crucial conversations are **not** synonymous with conflict. This was one of the first things we addressed in the training and I think it's one of the most important factors. When we enter crucial conversations prepared for conflict, we're already approaching fight or flight. We're ready to defend ourselves, to act in protection mode. The goal of crucial conversations is not to fight or protect ourselves, but rather to collaborate on desired results.\n\nTake a second to think about the last time you were part of a crucial conversation - a conversation where you perhaps felt stressed, overwhelmed, or nervous about the topic being discussed. How did your body react? Did your heart rate increase? Did you fall silent? Maybe instead your voice was raised. We each respond to crucial conversations in different ways that detract from the main goal of arriving at a solution that works for all parties.\n\nWe've likely all been part of a crucial conversation in the past, whether it be at work or home. Once we know how to identify these conversations, we can move on to strategies for having them effectively. \n\nAt GitLab, this means having effective, results-driven crucial conversations on Zoom with people from all over the world, which brings its own set of unique challenges.\n\n### Having crucial conversations is hard, and an all-remote team brings its own challenges.\n\nIn an office setting, you might pass by a manager or colleague who asks to discuss a challenge or frustration they're having with your work. Or at home, you might spend time after dinner discussing household responsibilities with your children or roommates. During these crucial conversations, we feed off of body language, tone, and energy in the room to recognize if someone feels [psychologically unsafe](/handbook/leadership/emotional-intelligence/psychological-safety/).\n\nBut on Zoom, when your teammate might be in their home office across town or across the globe, we need to use different cues to [build safety and trust](/handbook/leadership/building-trust/).\n\nSome ways we do that at GitLab include:\n\n1. We meet regularly with our people leaders in [1:1 meetings](/handbook/leadership/1-1/). These regular sessions give space for team members to raise crucial conversations often and address challenges and blockers early.\n1. We keep [1:1 agendas](/handbook/leadership/1-1/#the-1-1-agenda) to get a heads up on what will be discussed and to document action items and takeaways from synchronous conversations.\n1. We watch for the [body language cues](/handbook/leadership/crucial-conversations/#having-crucial-conversations-on-an-all-remote-team) that we can see on a video call or in a person's tone of voice. This includes checking if someone turns their camera off mid-call, becomes silent or unresponsive to the conversation, or sounds choked up or angry.\n1. We create intentional [space for pause](/handbook/leadership/crucial-conversations/#having-crucial-conversations-on-an-all-remote-team). There can be a sense of pressure to fill every minute during any conversation. During video or phone conversations, silence might feel more uncomfortable. We ask for and respect requests for a minute to think before responding right away.\n\nThese strategies aren't exclusive to an all-remote team - I'm sure they can have a positive impact on in-person crucial conversations, too! But when working on a remote team, it's important to recognize what's missing from in-person connection and be mindful to make the space as safe as possible.\n\nI've explained what crucial conversations are and how they show up in an all-remote work environment, but most importantly, I need to explain the **why**.\n\n### Why do crucial conversations matter?\n\nCrucial conversations enable our team to live our [CREDIT values](/handbook/leadership/crucial-conversations/#how-crucial-conversations-align-with-gitlab-values). In our [values hierarchy](https://handbook.gitlab.com/handbook/values/#hierarchy), we prioritize results. What I love most about crucial conversations is that they also prioritize results.\n\nHere's an example:\n\nImagine you're an individual contributor at GitLab. You're feeling overwhelmed with the number of projects on your plate this quarter.\n\nIf you wanted, you could commit to each project, knowing the deadlines were probably unrealistic. You could show up to work each day feeling stressed and overwhelmed. You might snap one day, saying something out of frustration to your team, and regret the comment later on.\n\nOr, you could decide to address the issue with your manager in your 1:1. You can:\n1. Collect your facts. In this case, it's your list of projects all due in the quarter.\n1. Share your story. Express how the workload feels unattainable and you know you can't complete your best work in the given time frame)\n1. Come to a conclusion together. Perhaps you decide to prioritize projects, breaking each project down into specific tasks and moving long-term priorities to the next quarter.\n\nThis second scenario is completely based on results. This crucial conversation has enabled you to set yourself up for success in completing every project with your highest quality of work. The company benefits from your high-quality output. Your team benefits from having a team member who isn't totally stressed out. You benefit from feeling safe and confident in the work you're doing. Every outcome from the conversation can be traced back to a key result for yourself, your team, and the company.\n\nWith such a focus on results, our GitLab team should be having crucial conversations every day!\n\nI see crucial conversations map back to the rest of our values as well. You can read more about the [alignment of GitLab values to crucial conversations in our handbook](/handbook/leadership/crucial-conversations/#how-crucial-conversations-align-with-gitlab-values).\n\n### Getting started having crucial conversations\n\nIf you've read through this post and want to give crucial conversations a try, here are a few ways to get started:\n\n1. Read our [Crucial Conversations handbook page](/handbook/leadership/crucial-conversations/).\n1. Read our [Psychological Safety handbook page](/handbook/leadership/emotional-intelligence/psychological-safety/). Creating safe space to have crucial conversations is essential.\n1. Check out the [Crucial Conversations training](https://www.vitalsmarts.com/crucial-conversations-training/) from VitalSmarts. GitLab team members might consider using our [Growth and Development benefit](https://about.gitlab.com/handbook/total-rewards/benefits/general-and-entity-benefits/#growth-and-development-benefit) to take the training themselves.\n1. Try it out! Practicing crucial conversations is the key to getting better at the skills, so give it a try at work, at home, or even with yourself!\n1. GitLab team members - keep an eye out for internal Crucial Conversations training coming in Q2/Q3 of this year as the Learning and Development team gets certified to deliver the training!\n\n### Looking for more Learning and Development material from GitLab?\n\nIf you want to learn more about what the Learning and Development team at GitLab is up to, check out our [handbook page](/handbook/people-group/learning-and-development/) or read our past newsletters. You can also reach us at `learning@gitlab.com`.\n",[832,811,9],{"slug":1599,"featured":6,"template":680},"crucial-conversations","content:en-us:blog:crucial-conversations.yml","Crucial Conversations","en-us/blog/crucial-conversations.yml","en-us/blog/crucial-conversations",{"_path":1605,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1606,"content":1612,"config":1618,"_id":1620,"_type":14,"title":1621,"_source":16,"_file":1622,"_stem":1623,"_extension":19},"/en-us/blog/cs-scavenger-hunt",{"title":1607,"description":1608,"ogTitle":1607,"ogDescription":1608,"noIndex":6,"ogImage":1609,"ogUrl":1610,"ogSiteName":667,"ogType":668,"canonicalUrls":1610,"schema":1611},"Customer Success Scavenger Hunt","The CS team was challenged with a weekend-long scavenger hunt to utilize their creativity and collaboration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681215/Blog/Hero%20Images/rocky.jpg","https://about.gitlab.com/blog/cs-scavenger-hunt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Customer Success Scavenger Hunt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chloe Whitestone\"}],\n        \"datePublished\": \"2020-04-06\",\n      }",{"title":1607,"description":1608,"authors":1613,"heroImage":1609,"date":1615,"body":1616,"category":698,"tags":1617},[1614],"Chloe Whitestone","2020-04-06","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nAs part of our [Contribute](https://about.gitlab.com/events/gitlab-contribute/) event, the Customer Success team had planned to do a scavenger hunt as a team-building activity. I, for one, absolutely love scavenger hunts, so I was thrilled. Unfortunately, Contribute this year was canceled, so I decided to make a virtual scavenger hunt for our team to enjoy instead.\n\n### Planning\n\nHow else would I start planning something at GitLab? I created [an issue](https://gitlab.com/gitlab-com/customer-success/tam/issues/212) to document my ideas, then I announced it to my team both synchronously and asynchronously. I wanted this to be as fun and easy as possible, so I only asked two things of my teammates:\n\n1. Vote on a weekend that worked well for them\n2. Join a Slack channel (#cs-scavenger-hunt-2020)\n\n![Annoucing the Scavenger Hunt](https://about.gitlab.com/images/blogimages/cs-scavenger-hunt/announcement.png){: .shadow}\n\nI didn't want to disrupt work hours, so this being on a weekend was another reason participating was purely voluntary. I gave everyone a week to vote on a date, then 3 days to join (or leave) the channel. Joining the Slack channel acted as registering for the event. I made sure to send them very important reminders throughout the weeks leading up to it, as well!\n\nMeanwhile, I went to work thinking of social distancing-friendly things I could include on the scavenger hunt. The goal was for team members to be able to take pictures of everything on the list, using creativity and teamwork, without needing to go out into public. I ended up with 40 items, some much more challenging than others, but everything doable and hopefully silly! I had zero expectations of people finishing the list, but some teams came extremely close! I created a [scavenger hunt project](https://gitlab.com/cs-scavenger-hunt/welcome-to-the-jungle/) where I created a README with guidelines and an issue with the full list of their tasks.\n\nOn the Thursday before the scavenger hunt, I gave the participants two critical pieces of information:\n\n1. Their teams - we had a total of 22 participants, so I divided them into 11 groups of 2, trying to be mindful of timezones, as well as trying to pair people I didn't think knew each other very well\n2. The link to the scavenger hunt project\n\nThroughout the next day, teams strategized for what tasks each person could do and fostered some healthy competition.\n\n### The Hunt Begins!\n\nThe scavenger hunt officially began at 12am in each team member's local timezone, and there was an element of collaboration required, as some tasks needed help from outside sources.\n\nFor example, one task I expected to be difficult, but the teams got creative quickly! The task was \"Screenshot other people saying the word “collaboration” to you in Slack or Zoom chat (screenshot must include the date & time) - 1 point for every different person, and 10 extra points if it’s from a member of the e-group.\" I woke up Saturday morning to see several posts in our company-wide Slack channel asking for team members to comment with the word \"collaboration\", and even our CEO posted a response!\n\n![Collaboration at Work](https://about.gitlab.com/images/blogimages/cs-scavenger-hunt/collaboration.png){: .shadow}\n\nOther tasks ranged from deciphering clues (\"Take a picture of a representation of 26.357896, 127.783809\" - those coordinates link to a picture of the Batman signal, and \"Take a picture of a team member doing the title of v=0UIB9Y4OFPs\" - that string leads to a YouTube video of \"Pour Some Sugar On Me\") to taking pictures of things at home, like them completing a puzzle and finding the oldest expired food item in their pantry.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://drive.google.com/file/d/1aM7Wg7V65I5-YOKfFptybL28vmRUqt90/preview\" width=\"640\" height=\"480\">\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### The Winners\n\nI requested that all pictures be due before the Tuesday after the scavenger hunt ended, and the teams uploaded their pictures to a Google Drive folder and shared it with me.\n\nOn the backend, I created a spreadsheet to calculate the winners. I created a tab for each team, with a link to their pictures at the top, then went through their pictures and entered a score next to each task. Here's how I approached the scoring:\n\n- Tasks with no pictures got 0 points\n- Tasks with pictures that got the spirit of the task but weren't complete or correct got 0.5 points\n- Tasks with pictures that were either complete or correct got 1 point\n- Tasks with pictures that also included other tasks got extra points\n- Tasks with pictures that demonstrated creativity got extra points\n- Tasks with videos as opposed to pictures got extra points\n\nAdditionally, the final task was intentionally vague and as such worth an extra 30 points. The task was \"There is a storage locker that contains a duffel bag filled with the final prize. Each team member has already been given the key. Find it and take a picture with the person who holds the treasure.\" This is a reference to Rat Race, but that didn't help team members at all. The \"storage locker\" was a project within the GitLab group I shared.  The \"duffel bag\" was a .gitlab-ci.yml file in that project. The \"key\" was them receiving access to this project the day the scavenger hunt began. The \"person who holds the treasure\" was a member of our team not participating, Hugo, because the .gitlab-ci.yml file was created with a [Hugo template](https://gitlab.com/pages/hugo/container_registry). Only one team figured out this clue! (No surprise they were also the winners!)\n\n![Sophie, Ove, and Hugo!](https://about.gitlab.com/images/blogimages/cs-scavenger-hunt/hugo.jpg){: .shadow}\n\nAfter letting my spreadsheet do the math, the winners were indubitable. With an incredible 200 points, team \"Candy Corn Skeleton\" won the hunt!\n\nBut the goal of this wasn't to have winners - it was to have fun together, build relationships and hopefully bring some joy into our lives, no matter what today's climate looks like. So I also ordered special limited-edition Sherlock Holmes-themed GitLab stickers, that I will give to all participants as a thank you.\n\nI can't wait to do this again in the future, hopefully have it be more automated, and with even more people! Keep scrolling for a few of my favorite pictures and videos from this weekend!\n\n![Recreating Abbey Road](https://about.gitlab.com/images/blogimages/cs-scavenger-hunt/abbey-road1.jpeg){: .shadow}\n![Recreating Abbey Road](https://about.gitlab.com/images/blogimages/cs-scavenger-hunt/abbey-road2.jpg){: .shadow}\n![Favorite Halloween Costume](https://about.gitlab.com/images/blogimages/cs-scavenger-hunt/halloween.jpeg){: .shadow}\n![Favorite Quarantine Meal](https://about.gitlab.com/images/blogimages/cs-scavenger-hunt/quarantine-meal.jpg){: .shadow.small} ![Silliest Face You Can Make](https://about.gitlab.com/images/blogimages/cs-scavenger-hunt/silly-face2.jpg){: .shadow.small}\n![Silliest Face You Can Make](https://about.gitlab.com/images/blogimages/cs-scavenger-hunt/silly-face1.jpg){: .shadow}\n![Vulcan Salute](https://about.gitlab.com/images/blogimages/cs-scavenger-hunt/vulcan-salute.png){: .shadow}\n\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://drive.google.com/file/d/1IsSXvlZnKmb7fXjelENoCR127YdRMpbL/preview\" width=\"640\" height=\"480\">\u003C/iframe>\n\u003C/figure>\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://drive.google.com/file/d/1oM8ZoafL1VCb2NQ0wZlWZCu2dF5YTku-/preview\" width=\"640\" height=\"480\">\u003C/iframe>\n\u003C/figure>\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://drive.google.com/file/d/1SzZXX489KSXa2qIjHU2Rt-XA1B69ESys/preview\" width=\"640\" height=\"480\">\u003C/iframe>\n\u003C/figure>\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://drive.google.com/file/d/1RtxLWNQY2av5O-sXy-MSK6f8XP_zmBVe/preview\" width=\"640\" height=\"480\">\u003C/iframe>\n\u003C/figure>\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://drive.google.com/file/d/1CHtoKEUIiZqAbeSUbCKMnRAIJUTOM_2p/preview\" width=\"640\" height=\"480\">\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[9],{"slug":1619,"featured":6,"template":680},"cs-scavenger-hunt","content:en-us:blog:cs-scavenger-hunt.yml","Cs Scavenger Hunt","en-us/blog/cs-scavenger-hunt.yml","en-us/blog/cs-scavenger-hunt",{"_path":1625,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1626,"content":1632,"config":1638,"_id":1640,"_type":14,"title":1641,"_source":16,"_file":1642,"_stem":1643,"_extension":19},"/en-us/blog/database-case-study-store-and-update-namespace-statistics",{"title":1627,"description":1628,"ogTitle":1627,"ogDescription":1628,"noIndex":6,"ogImage":1629,"ogUrl":1630,"ogSiteName":667,"ogType":668,"canonicalUrls":1630,"schema":1631},"Store and update namespace statistics in a performant manner","Explore all the different engineering approaches to store and update the namespace statistics in a performant manner.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672677/Blog/Hero%20Images/metalgears_databasecasestudy.jpg","https://about.gitlab.com/blog/database-case-study-store-and-update-namespace-statistics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Store and update namespace statistics in a performant manner\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mayra Cabrera\"}],\n        \"datePublished\": \"2019-10-14\",\n      }",{"title":1627,"description":1628,"authors":1633,"heroImage":1629,"date":1635,"body":1636,"category":743,"tags":1637},[1634],"Mayra Cabrera","2019-10-14","\nManaging storage space on large GitLab instances, such as GitLab.com, can be a challenge. At the moment, we only have a restriction on repository limits, but no restriction on most of the other items that can consume storage space: wiki, lfs objects, artifacts, and packages, to mention a few.\n\nWe want to facilitate a method for easily viewing the amount of storage consumed by a group and allow easy management on GitLab.com by setting [storage and limits management for groups](https://gitlab.com/groups/gitlab-org/-/epics/886). But to do that we need a way to track the statistics of a namespace, whether it is a Group or a User namespace.\n\n## Proposal to track the statistics of a namespace\n\n1. Create a new ActiveRecord model to hold the namespaces' statistics in an aggregated form: Only for root namespaces.\n2. Refresh the statistics in this model every time a project belonging to this namespace is changed.\n\nThe \"refresh\" part is the tricky one. Currently we don't have a pattern to update/refresh the namespace statistics every time a project belonging to the namespace is updated.\n\nWe refreshed projects statistics in the following way:\n\n1. We have a model called `ProjectStatistics`,\n2. The records on `ProjectStatistics` are updated through a [callback](https://gitlab.com/gitlab-org/gitlab-ce/blob/v12.2.0.pre/app/models/project.rb#L90) every time the project is saved.\n3. The summary of those statistics per namespace is then retrieved by [`Namespaces#with_statistics`](https://gitlab.com/gitlab-org/gitlab-ce/blob/v12.2.0.pre/app/models/namespace.rb#L70) scope.\n\nAnalyzing this query we noticed that:\n\n- It takes up to `1.2` seconds for namespaces with over `15 000` projects.\n- Any attempt to run `EXPLAIN ANALYZE` results in query timeouts (15 seconds) when using our internal tooling.\n\nAdditionally, the callback to update the project statistics doesn't scale. It is currently one of the most [frequently run and expensive database queries](https://gitlab.com/gitlab-org/gitlab-ce/issues/62488) on GitLab.com. We can't add one more query to it as\nit will increase the transaction's length.\n\nBecause of these reasons, we can't apply the same pattern to store\nand update the namespaces' statistics, as the `namespaces` table is one\nof the largest tables on GitLab.com. Therefore, we have to find a performant and\nalternative method.\n\n## Our Attempts\n\n### Attempt A: PostgreSQL materialized view\n\nUpdate the ActiveRecord model with a refresh strategy based on project routes and a [materialized view](https://www.postgresql.org/docs/9.6/rules-materializedviews.html):\n\n```sql\nSELECT split_part(\"rs\".path, '/', 1) as root_path,\n        COALESCE(SUM(ps.storage_size), 0) AS storage_size,\n        COALESCE(SUM(ps.repository_size), 0) AS repository_size,\n        COALESCE(SUM(ps.wiki_size), 0) AS wiki_size,\n        COALESCE(SUM(ps.lfs_objects_size), 0) AS lfs_objects_size,\n        COALESCE(SUM(ps.build_artifacts_size), 0) AS build_artifacts_size,\n        COALESCE(SUM(ps.packages_size), 0) AS packages_size\nFROM \"projects\"\n    INNER JOIN routes rs ON rs.source_id = projects.id AND rs.source_type = 'Project'\n    INNER JOIN project_statistics ps ON ps.project_id  = projects.id\nGROUP BY root_path\n```\n\nWe could then execute the query with:\n\n```sql\nREFRESH MATERIALIZED VIEW root_namespace_storage_statistics;\n```\n\nWhile this implied a single query update, it has some downsides:\n\n- The query itself would not be fast, as it would need to update all the statistics every time it runs. Execution time of this query will increase as the number of namespaces and projects grow.\n- Materialized views syntax varies from PostgreSQL and MySQL. At the time this feature was worked on, [GitLab still supported MySQL, which it now no longer supports.](/blog/removing-mysql-support/).\n- Rails does not have native support for materialized views. We'd need to use a specialized gem to take care of the management of the database views, which implies additional work.\n\n### Attempt B: An update through a CTE\n\nUpdate the ActiveRecord model with a refresh strategy through a [Common Table Expression](https://www.postgresql.org/docs/9.1/queries-with.html).\n\n```sql\nWITH refresh AS (\n  SELECT split_part(\"rs\".path, '/', 1) as root_path,\n        COALESCE(SUM(ps.storage_size), 0) AS storage_size,\n        COALESCE(SUM(ps.repository_size), 0) AS repository_size,\n        COALESCE(SUM(ps.wiki_size), 0) AS wiki_size,\n        COALESCE(SUM(ps.lfs_objects_size), 0) AS lfs_objects_size,\n        COALESCE(SUM(ps.build_artifacts_size), 0) AS build_artifacts_size,\n        COALESCE(SUM(ps.packages_size), 0) AS packages_size\n  FROM \"projects\"\n        INNER JOIN routes rs ON rs.source_id = projects.id AND rs.source_type = 'Project'\n        INNER JOIN project_statistics ps ON ps.project_id  = projects.id\n  GROUP BY root_path)\nUPDATE namespace_storage_statistics\nSET storage_size = refresh.storage_size,\n    repository_size = refresh.repository_size,\n    wiki_size = refresh.wiki_size,\n    lfs_objects_size = refresh.lfs_objects_size,\n    build_artifacts_size = refresh.build_artifacts_size,\n    packages_size  = refresh.packages_size\nFROM refresh\n    INNER JOIN routes rs ON rs.path = refresh.root_path AND rs.source_type = 'Namespace'\nWHERE namespace_storage_statistics.namespace_id = rs.source_id\n```\n\nUnlike Attempt A, a CTE will be limited to the namespace we care about instead of operating on all namespaces. The downside of it,\nis that earlier versions of MySQL do not support Common Table Expressions.\n\n### Attempt C: Get rid of the model and store the statistics on Redis\n\nWe could get rid of the model that stores the statistics in aggregated form and instead use a Redis Set.\nThis would be the [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions) and the fastest one\nto implement, as GitLab already includes [Redis](https://docs.gitlab.com/ee/development/architecture.html#redis) as part of its Architecture.\n\nThe downside of this approach is that Redis does not provide the same persistence/consistency guarantees as PostgreSQL,\nand the namespace statistics are information we can't afford to lose in a case of a Redis failure. Also, searching for\ninformation like the largest namespaces per repository size will be easier to do in PostgreSQL than in Redis.\n\n### Attempt D: Tag the root namespace and its child namespaces\n\nDirectly relate the root namespace to its child namespaces, so\nwhenever a child namespace is created, it's also tagged with the\nroot namespace ID:\n\n| id | root_id | parent_id\n|:---|:--------|:----------\n| 1  | 1       | NULL\n| 2  | 1       | 1\n| 3  | 1       | 2\n\nTo aggregate the statistics inside a namespace we'd execute something like:\n\n```sql\nSELECT COUNT(...)\nFROM projects\nWHERE namespace_id IN (\n  SELECT id\n  FROM namespaces\n  WHERE root_id = X\n)\n```\n\nEven though this approach would make aggregating much easier, it has some major downsides:\n\n- We'd have to migrate **all namespaces** by adding and filling a new column. Because of the size of the table, dealing with the time/cost will not be great. The [background migration will take approximately 153 hours](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/29772#note_182201607).\n- The background migration has to be shipped one release before we want to start using the new data, delaying the functionality by another milestone.\n\n### Attempt E: Update the namespace storage statistics asynchronously\n\nFor this approach we continue using the incremental statistics updates we already have,\nbut we refresh them through Sidekiq jobs and in different SQL transactions:\n\n1. Create a second table (`namespace_aggregation_schedules`) with two columns `id` and `namespace_id`.\n1. Whenever the statistics of a project changes, insert a row into `namespace_aggregation_schedules`\n   - We don't insert a new row if there's already one related to the root namespace.\n   - Keeping in mind the length of the transaction that involves [updating `project_statistics`](https://gitlab.com/gitlab-org/gitlab-ce/issues/62488), the insertion should be done in a different transaction and through a Sidekiq Job.\n1. After inserting the row, we schedule another worker to be executed async at two different moments:\n   - One enqueued for immediate execution and another one scheduled in `1.5h` hours.\n   - We only schedule the jobs if we can obtain a `1.5h` lease on Redis on a key based on the root namespace ID.\n   - If we can't obtain the lease it indicates there's another aggregation already in progress or scheduled in no more than `1.5h`.\n1. This worker will:\n   - Update the root namespace storage statistics by querying all the namespaces through a service.\n   - Delete the related `namespace_aggregation_schedules` after the update.\n1. Another Sidekiq job is also included to traverse any remaining rows on the `namespace_aggregation_schedules` table and schedule jobs for every pending row.\n   - This job is scheduled with cron to run every night (UTC).\n\nThis implementation has the following benefits:\n\n- All the updates are done async, so we're not increasing the length of the transactions for `project_statistics`.\n- We're doing the update in a single SQL query.\n- It is compatible with PostgreSQL and MySQL.\n- No background migration is required.\n\nThe downsides of this approaches are:\n\n* Namespaces' statistics are updated up to `1.5` hours after the change is done, which means there's a brief window in time where the statistics are inaccurate. This is not a major problem because we're not currently [enforcing storage limits](https://gitlab.com/gitlab-org/gitlab-ce/issues/30421).\n* From the implementation perspective, this approach is more complex than the migration approach (Attempt D).\n* `namespace_aggregation_schedules` table will see a high rate of inserts and deletes, which may require that we tune auto vacuuming for this table.\n\nWe went with *Attempt E* because updating the storage statistics asynchronously was the less problematic and\nperformant approach of aggregating the root namespaces.\n\n## Enabling the feature on GitLab.com\n\nGiven this is a performance improvement, we have to be very careful introducing this change to GitLab.com: Which is why\nwe decided to release it under [feature flag](https://docs.gitlab.com/ee/development/feature_flags/) and roll it out gradually by:\n\n1. Enable it on our staging environment and measure the performance.\n2. Enable it on GitLab.com on different periods for the `gitlab-org` group and measure the performance.\n3. Enable it globally on GitLab.com on different periods and measure the performance.\n\nFinally if no problem arises, we can be confident this change performs properly on GitLab.com and we can\nremove the feature flag.\n\n## Measuring the performance\n\nTo assess the execution of this approach, we monitored the [Sidekiq dashboards](https://dashboards.gitlab.com/d/9GOIu9Siz/sidekiq-stats?orgId=1) on Kibana to ensure jobs were being executed flawlessly and without using too much memory or CPU. Particularly, we observed the \"Sidekiq queue size,\" \"Rate of running jobs,\" and \"Running jobs\" dashboards.\n\n### On staging\n\nThe feature was enabled globally on staging and the execution of the jobs was satisfactory. But there was barely any traffic to measure the impact of our changes:\n\n![Graph showing the queue size of the ScheduleAggregationWorker on staging](https://about.gitlab.com/images/blogimages/namespace_statistics/staging-1.png){: .shadow.medium.center}\n\n### Enabling root namespaces on GitLab.com\n\nOur results were different on GitLab.com. We first enabled it for the `gitlab-org` group and we quickly started to observe more traffic:\n\n![Graph showing the queue size of the ScheduleAggregationWorker on GitLab.com](https://about.gitlab.com/images/blogimages/namespace_statistics/production-1.png){: .shadow.medium.center}\n\n![Graph showing the running jobs of the ScheduleAggregationWorker on GitLab.com](https://about.gitlab.com/images/blogimages/namespace_statistics/production-2.png){: .shadow.medium.center}\n\nOnce we enabled the feature flag globally, the rate of running jobs increased considerably:\n\n![Graph showing the rate running jobs of the ScheduleAggregationWorker on GitLab.com](https://about.gitlab.com/images/blogimages/namespace_statistics/production-3.png){: .shadow.medium.center}\n\n![Graph showing the rate running jobs of the RootStatisticsWorker on GitLab.com](https://about.gitlab.com/images/blogimages/namespace_statistics/production-4.png){: .shadow.medium.center}\n\n## Root namespaces on GitLab.com today\n\nWe currently have nearly `400 000` statistics stored for root namespaces on GitLab.com, which are updated at a high pace.\nBeing able to efficiently fetch those statistics allows one to easily track the top biggest repositories and/or namespaces on an instance\nand to start paving the way to enforce storage limits for groups on GitLab.com.\n\nLearn more about this use case by reading:\n\n- [The original issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/62214)\n- [Merge Request with the implementation](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/28996)\n- [Details of the performance measured against staging and production (GitLab.com)](https://gitlab.com/gitlab-org/gitlab-ce/issues/64092)\n\nCover photo by [Bill Oxford](https://unsplash.com/@bill_oxford?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/engineering?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[9],{"slug":1639,"featured":6,"template":680},"database-case-study-store-and-update-namespace-statistics","content:en-us:blog:database-case-study-store-and-update-namespace-statistics.yml","Database Case Study Store And Update Namespace Statistics","en-us/blog/database-case-study-store-and-update-namespace-statistics.yml","en-us/blog/database-case-study-store-and-update-namespace-statistics",{"_path":1645,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1646,"content":1652,"config":1658,"_id":1660,"_type":14,"title":1661,"_source":16,"_file":1662,"_stem":1663,"_extension":19},"/en-us/blog/day-in-life-of-remote-sdr",{"title":1647,"description":1648,"ogTitle":1647,"ogDescription":1648,"noIndex":6,"ogImage":1649,"ogUrl":1650,"ogSiteName":667,"ogType":668,"canonicalUrls":1650,"schema":1651},"A day in the life of a remote Sales Development Representative","Working as a remote SDR is a fulfilling career that enables flexibility, a positive work/life balance, and encourages strong bonds with team members.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680115/Blog/Hero%20Images/day-in-life-remote-sdr.jpg","https://about.gitlab.com/blog/day-in-life-of-remote-sdr","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A day in the life of a remote Sales Development Representative\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Miranda\"}],\n        \"datePublished\": \"2018-05-11\",\n      }",{"title":1647,"description":1648,"authors":1653,"heroImage":1649,"date":1655,"body":1656,"category":808,"tags":1657},[1654],"Michael Miranda","2018-05-11","\n\nSales Development Representatives (SDRs) are the frontline forces built to march through endless rejection and cold calls to people who may not have heard of a product or who may already be committed to a solution. SDRs help an organization generate revenue by playing a crucial role in actively searching for customers who could benefit from our products and working to set up meetings between decision makers, solutions architects, and account executives. Across all industries, this is a difficult but rewarding path, since we have to use a variety of techniques to quickly establish a meaningful rapport and open a line of communication. Essentially, an SDR is a bridge between new customers and the product.\n\nIf you're familiar with the SDR role, then you have a basic understanding of what a typical day looks like. But, I don’t work at a typical organization. I work at GitLab, and I want to introduce you to life as a [remote SDR](https://handbook.gitlab.com/job-families/marketing/sales-development-representative/). Yes, there are some challenges, and yes, it does look different, but I wouldn’t have it any other way.\n\n## Getting started\n\nNormally, I wake up and check my email, Slack, and my calendar to know what I have for the day. Then, I take my daily three-step commute to the office; that’s right, three whole steps (studio apartment, don’t judge). Or, I’ll take a seven-minute walk to the coworking space that GitLab pays for. As a remote SDR, I can work from anywhere!\n\nThe activity kicks off with a daily standup meeting with my team, where we plan to discuss serious goals for the day but end up laughing about stories shared from the previous one. Our camaraderie helps to let off some steam and serves as a daily reminder that we’re all in this together. After our standup, we sync up with the rest of the company for the [Team Call](/handbook/communication/#team-call), which gives us a glimpse into the lives of other ’Labbers across the world. I may have a couple of meetings after the call, but here at GitLab, we’re not bogged down by countless internal meetings. We’re given the time to focus on what’s really important: crushing quota!\n\n## Working collaboratively\n\nAfter doing my research, I’ll make my calls and send out emails throughout the day. If I’m stuck on something, I’ll reach out to my team, hop on a video call with someone, and collaborate to come up with a strategy or solution. One of the most refreshing aspects about working at GitLab is that everyone is willing to help each other. We’re encouraged to build bonds with each other, so I schedule regular [coffee chats](/company/culture/all-remote/tips/#coffee-chats) to make a new friend or catch up with an old one. Everyone from our CEO to our newest ’Labber is happy to join a coffee call. In my first week, I messaged our [Chief People Officer](https://handbook.gitlab.com/job-families/people-group/chief-people-officer/), and we chatted the next day. I’ve never worked at an organization that encouraged its executive team to make themselves so available to others.\n\nIn addition to collaboration, GitLab values flexibility. If I have a doctor’s appointment, errands to run, or just decide that I want to take a longer lunch and embrace food coma, I have the freedom to do so. As a results-driven organization, GitLab is concerned about your productivity – not your hours.\n\n## Physical proximity isn’t necessary\n\nThroughout the day, my [account executive](https://handbook.gitlab.com/job-families/sales/account-executive) (AE) and I have a 1:1 call, depending on activity. At GitLab, each SDR is paired with a knowledgeable, experienced AE. My AE and I have a strong relationship, and we’re constantly communicating throughout the day. We send articles to each other, share random ideas, and drop in the occasional (ok, maybe frequent is the better word here) hilarious GIF or video. I feel like we’ve known each other forever, and we’ve never even met in person. He’s in Kansas, and I’m in Los Angeles! When I first started working at GitLab, I wasn’t sure how easy it would be to communicate with others across time zones, but with all the tools and processes that GitLab has developed, collaborating with him feels natural and easy. We may not work in the same building, but we have developed a great relationship.\n\n>I wasn’t sure how easy it would be to communicate with others across time zones, but with all the tools and processes that GitLab has developed, collaborating feels natural and easy\n\nThe freedom to work wherever I want allows me to minimize distractions and control noise levels, something that many SDRs are unable to do in a traditional office setting. I am able to completely focus on making calls, connecting with customers, and conducting research without getting distracted by the buzzing conversations of other SDRs around me. If I worked in an office, I would also be subjected to the challenges of context switching if a fellow SDR unexpectedly stopped by my cubicle to discuss a call or ask for help. While it may take more effort to build camaraderie with team members when working remotely, I believe that a remote work environment is more conducive to an SDR role, since noise and distraction can make potential customers feel unimportant.\n\nI wrap up each day by looking over my tasks and setting myself up for the next day. Sometimes I cut my day shorter (yay for Friday!) or start later (~~yay for~~ Monday!). The beautiful thing is that we focus on results, not the amount of hours teammates put in. I’m not forced to clock in at a certain time or wait to clock out even though my work is done. Remote and SDR may be two words you’d never thought would be a good fit, but I’m here to tell you that it fits well. I’ve been grateful to learn and experience that physical proximity isn’t required to develop strong bonds, deliver results, and feel immersed in a company culture. GitLab empowers their frontline with tools to facilitate camaraderie, helping the SDR team march forward to success.\n\nDoes working as a remote SDR appeal to you? We're hiring across multiple time zones – check out [our job listings](/jobs/).\n\nPhoto by [rawpixel.com](https://unsplash.com/) on [Unsplash](https://unsplash.com/search/photos/business?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[832,9],{"slug":1659,"featured":6,"template":680},"day-in-life-of-remote-sdr","content:en-us:blog:day-in-life-of-remote-sdr.yml","Day In Life Of Remote Sdr","en-us/blog/day-in-life-of-remote-sdr.yml","en-us/blog/day-in-life-of-remote-sdr",{"_path":1665,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1666,"content":1672,"config":1678,"_id":1680,"_type":14,"title":1681,"_source":16,"_file":1682,"_stem":1683,"_extension":19},"/en-us/blog/day-in-the-life-remote-worker",{"title":1667,"description":1668,"ogTitle":1667,"ogDescription":1668,"noIndex":6,"ogImage":1669,"ogUrl":1670,"ogSiteName":667,"ogType":668,"canonicalUrls":1670,"schema":1671},"A day in the life of the \"average\" remote worker","Go on, you know you're curious! Explore a day in the life of GitLab team members from around the world.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670134/Blog/Hero%20Images/remote-life-cover.png","https://about.gitlab.com/blog/day-in-the-life-remote-worker","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A day in the life of the \"average\" remote worker\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"},{\"@type\":\"Person\",\"name\":\"Charlie Ablett\"}],\n        \"datePublished\": \"2019-06-18\",\n      }",{"title":1667,"description":1668,"authors":1673,"heroImage":1669,"date":1675,"body":1676,"category":808,"tags":1677},[672,1674],"Charlie Ablett","2019-06-18","\nGitLab is an [all-remote company](/company/culture/all-remote/), meaning we are not based in one location or even one time zone. Instead, our team is distributed in home offices and work spaces [across the globe](/company/team/#countries), everywhere from San Francisco to London to Taipei.\n\nBecause GitLab is not limited to one time zone, we work asynchronously. Our asynchronous workflow gives us a [competitive advantage](/blog/remote-enables-innovation/), because we are contributing 24 hours a day, as opposed to the standard 9am-5pm if we had a brick-and-mortar office headquartered in one location. As an organization, the focus is not on when or how a team member works, but rather on our [results](https://handbook.gitlab.com/handbook/values/#results).\n\nBecause of this emphasis on results rather than regimen, there is a lot of variability in how we structure our workdays. At [Contribute 2019](/events/gitlab-contribute/), a group of us came together to discuss how we use the flexibility GitLab affords us to structure our ideal workdays. Our discussion featured team members working in different capacities, as engineers, writers, and managers, from many different locations.\n\n## Morning\n\nThere are a few morning activities that were universal: A warm cup of coffee or tea to kick off the day; and morning cuddles with a cat or dog if you have the good fortune of having a pet.\n\n\"When my alarm goes off, Milly, my dog, who hates getting out of bed, snuggles closer to me. I get up, make coffee, and log on to begin working. Meanwhile, Milly is usually still in bed until 10:30am, sometimes even 11:30am,\" says [Sara Kassabian](/company/team/#sarakassabian), content editor, from Oakland, California.\n\nFor some of us, sunshine (or other humans) function as the morning wake-up call.\n\n\"I stopped setting an alarm because mornings are quiet in my time zone, and (inevitably) I get woken by my upstairs neighbors getting ready for work anyway! I make a big cup of coffee and try to get all my deep focus work done before my coworkers in the US start to wake up,\" says [Rebecca Dodd](/company/team/#rebecca), managing editor, from London.\n\n\"I also do not set an alarm because I often work until late. Usually my kids wake me up at some point, and then I will have a big cup of tea,\" says [Charlie Ablett](/company/team/#cablett), a senior backend engineer, [Plan](/handbook/engineering/development/dev/plan/), from New Zealand.\n\nMornings can be a particularly hectic time for working parents. Oftentimes, parents who don't work remotely will have to juggle getting their children ready for school, getting ready for work, and making school drop-off in time to get to the office between 8am-9am. Parents working at GitLab have the opportunity to be in the home and available to their children because we're [all remote](/blog/building-an-award-winning-culture-at-gitlab/). Flexible scheduling makes it a little bit easier to balance family with work obligations.\n\n## Midday\n\nWe all structure our afternoons differently. Some of us have children to pick up from school, while others are just starting their workday, or taking a break from the computer to run errands or exercise.\n\n\"I usually take a break to go running or to walk my dog. Then I’ll pick up my kids from school. I usually have one or two more screening calls and some team meetings,\" says [Stephanie Garza](/company/team/#stephaniegarza), diversity sourcing specialist, from Michigan.\n\n\"I start my workday in the afternoon by checking Slack and emails. I may go for a walk. I might work out then start focused work at 3pm or so,\" says [Laura Montemayor](/company/team/#lauraMon), frontend engineer, [Monitor](/handbook/engineering/frontend/monitor/), from New York City.\n\nWeather is also a big determinant about whether work or play is on the agenda for the afternoon.\n\n\"It depends on whether or not the weather is nice or if I have plans in the evening. If it’s sunny in New York City, you have to go outside. If it’s nice I want to go enjoy the weather! Or if I’m going out in the evening I’ll get my work done first,\" says Avielle Wolfe, backend engineer, [Secure](/handbook/engineering/development/sec/secure/), from New York City.\n\n\"If it’s sunny in Oakland, I will take Milly for a longer walk, which gives me some Vitamin D and the boost of energy I’ll need to finish up any remaining tasks for the day,\" added [Sara](/company/team/#sarakassabian).\n\nNot every team member lives in a location as urban as Oakland or New York City. Some live in suburban neighborhoods or more rural locations, all of which can have an impact on how we structure our day. For instance, [Charlie](/company/team/#cablett), who lives in a more rural setting, once had to set aside an hour around 4pm each day to milk her cows.\n\n## Evening\n\nFor those of us with children, the evenings are the ideal time to set work aside and focus on family time.\n\n\"My evening typically begins with practice. My daughter does soccer and my son does karate. My husband works a weird schedule so this is my alone time with the kids. I will make dinner and then get some more work done sometime between 8-10pm,\" says [Stephanie](/company/team/#stephaniegarza).\n\nIf our workday started in the afternoon as opposed to the morning, there are often more tasks to be completed throughout the evening.\n\n\"I am still working by evening,\" says [Laura](/company/team/#lauraMon). \"I’ll have a meal around 8pm. If I have plans, I go out, otherwise I play video games. If I get a second wind I’ll work more after 10pm.\"\n\n\"I try to finish my work by 6pm, but if I work overtime then the next day I will have an excuse to relax a little bit! In the evenings, I’ll cook dinner by putting some chicken and veggies into a steamer pot, and then continue working while that cooks, or I will go out for dinner. Sometimes I’ll attend local meetups, or just relax and watch TV. My bedtime is around midnight,\" says [Mark Chao](/company/team/#lulalala_it), backend engineer, [Create](/handbook/engineering/development/dev/create/), from Taipei.\n\n## Focused versus collaborative work\n\nGitLab gives us the flexibility to build a custom schedule, so early birds and night owls can work when they feel they are most effective. When we choose to work in tandem with teammates and when we do our focus work depends primarily upon two factors: When the overlap happens across teams and time zones, and also when we are most focused and/or creative.\n\n\"Europe and the Americas are chatty overnight so I have lots to catch up on in the morning, including the minutes of meetings that happened at 3am (e.g., daily company call),\" says [Charlie](/company/team/#cablett). \"America is still awake so I collaborate with them if I need to, and I do all my deep focus work later on when not many folks are around.\"\n\nThough GitLab has a globally distributed team across 54 countries and regions, the majority of us are based in the United States and Europe.\n\n\"After lunch, I get maybe one more hour of focused work in until 3pm when America wakes up. Then meetings start, Slack gets busy, and then I'm trying to disentangle myself and switch off for the evening,\" says [Rebecca](/company/team/#rebecca). \"If something doesn’t happen before 3pm, it generally doesn’t happen that day.\"\n\n\"In the afternoon for me, people will start to wake up and log on so I will have more interactions and working on issues,\" says [Mark](/company/team/#lulalala_it).\n\nSometimes team members with children will log on to complete a few more hours of work while the children are sleeping, generally between 8pm-10pm, and sometimes after 10pm.\n\n## Family first at GitLab\n\n\"I love working at GitLab for a variety of reasons, but the flexibility in creating work-life harmony in my life tops my list. I work closely with our executive team here, and they have been so supportive and encouraging when family-related conflicts arise. They are constantly reminding me that \"[family first](https://handbook.gitlab.com/handbook/values/#family-and-friends-first-work-second)\" is our mantra, and give me ease of mind to take time away when needed,\" says [Cheri Holmes](/company/team/#cheriholmes), manager, executive assistant, from Dublin, California, in a previous [blog post](/blog/building-an-award-winning-culture-at-gitlab/).\n\nInc. Magazine recently ranked GitLab as one of the [best places to work](/blog/building-an-award-winning-culture-at-gitlab/), due in large part to a company culture that gives team members the agency to balance our personal and professional obligations. While the \"average\" team member may not share a schedule, we do share a commitment to our [values](https://handbook.gitlab.com/handbook/values/#credit): Collaboration, results, efficiency, diversity, iteration, and transparency. In order to work asynchronously effectively, everyone must embrace and embody the values of our organization.\n",[677,832,811,9],{"slug":1679,"featured":6,"template":680},"day-in-the-life-remote-worker","content:en-us:blog:day-in-the-life-remote-worker.yml","Day In The Life Remote Worker","en-us/blog/day-in-the-life-remote-worker.yml","en-us/blog/day-in-the-life-remote-worker",{"_path":1685,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1686,"content":1692,"config":1699,"_id":1701,"_type":14,"title":1702,"_source":16,"_file":1703,"_stem":1704,"_extension":19},"/en-us/blog/deep-dive-into-gitlabs-ux-design-process",{"title":1687,"description":1688,"ogTitle":1687,"ogDescription":1688,"noIndex":6,"ogImage":1689,"ogUrl":1690,"ogSiteName":667,"ogType":668,"canonicalUrls":1690,"schema":1691},"A deep dive into GitLab's UX design process","The UX team shares how they communicate, plan, share, and tackle improvements one iteration at a time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678759/Blog/Hero%20Images/designwebcast.jpg","https://about.gitlab.com/blog/deep-dive-into-gitlabs-ux-design-process","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A deep dive into GitLab's UX design process\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-09-05\",\n      }",{"title":1687,"description":1688,"authors":1693,"heroImage":1689,"date":1694,"body":1695,"category":743,"tags":1696},[930],"2018-09-05","\nThe [UX team](/handbook/product/ux/#ux-at-gitlab) recently gathered to share\nhow they collaborate in a fully remote environment. Our team of two UX researchers\nand nine UX designers spans eight countries and six time zones. In this webcast,\nthey discussed UX research, community contributions, and hiring, making it an\nexcellent resource in helping you learn more about\n[GitLab design](https://gitlab.com/gitlab-org/gitlab-design/#gitlab-design).\n\n### Watch the webcast\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/6R64hHkkgtE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## What we covered\n\nThe UX team generously provided insight into their workflow and projects. Below\nare a few of our favorite takeaways.\n\n### Iteration\n\nAt GitLab, [iteration](https://handbook.gitlab.com/handbook/values/#iteration) means making the smallest\nthing possible and getting it out as quickly as possible, helping us reduce the\ncycle time and rapidly get feedback from users so that we can continue to improve\nquickly and efficiently. Planning too far ahead without getting real-world\nfeedback can cause you to build something that doesn't meet user needs.\n\n### UX Research\n\nThe goal of UX research is to understand the needs and concerns of users, often\nby observing how they interact with a product or by gathering data through\nvarious methods. At GitLab, we often use survey research, feasibility testing,\nuser interviews, and card sorting to understand our users. We discuss the\nresults with product managers to help us prioritize feedback and determine the\nnext steps to implement the findings.\n\n### GitLab Design System\n\nOne of the team's major initiatives last year was  the\n[GitLab Design System](https://design.gitlab.com/), which\nincludes content guidelines, usability patterns, foundational styles, and reusable\ncomponents. The team shifted its focus towards system thinking to create\nconsistency throughout the product and predictability across experiences. The UXers\nhave been working closely with our frontend team to implement our system\niteratively.\n\nEvery designer writes usage guidelines during every milestone and\npicks at least one issue within the issue tracker to contribute to the project.\nThe design system is open source, just like the rest of GitLab, so everyone is\nencouraged to question any of the decisions we've made or contribute by making\nthings clearer or adding missing content.\n\n### How you can contribute to GitLab’s UX designs\n\nAs an open source company, we believe in transparency, so we share almost\neverything we do, including source files, artifacts, deliverables, case studies,\n[UX research](https://gitlab.com/gitlab-org/ux-research#research-archive), and\nour findings. Being open source allows the community to learn from us, and for\nus to learn from the community. There are issues that have been\nlabeled '[Accepting merge requests](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=Accepting+merge+requests&label_name[]=UX)'\nand they need some UX work. Most of these are very small issues, making them the\nperfect starting point for first-time contributors. If you have an idea for a UX\nimprovement, we encourage you to create an issue using the feature proposal\ntemplate to describe the problem you're trying to solve and your proposed solution.\n\nOur UX researchers encourage community contributions, so if you're interested\nin exploring a research question, you're welcome to create an issue using a\nsearch proposal template in the\n[UX research project](https://gitlab.com/gitlab-org/ux-research#contributing).\nIf you’d like to help shape the future of GitLab, we’d love to invite you to\njoin [GitLab First Look](/community/gitlab-first-look/).\n\nThe UX team is happy to chat with you about your contribution,\nand we'll try to get back to you as soon as we can.\n\n### Join us!\n\nOur UX team is growing, and we'd love to work with you! We're currently looking\nfor three UX designers with an interest in our products. So, whether that's the\ndevelopment side or the operations side, we have a lot going on, and we have\nsomething for everyone. We're recruiting for specific teams, including Release\nand Verify, Monitor, and Secure teams. If you're interested in working with our\ntalented (and fun!) UX team, we encourage you [to apply](/jobs/)!\n\n[Cover image](https://unsplash.com/photos/MGBgTX1Zmpo) by [Chris Barbalis](https://unsplash.com/@cbarbalis), licensed\nunder [CC X](https://unsplash.com/license).\n{: .note}\n",[9,700,1697,1698],"webcast","design",{"slug":1700,"featured":6,"template":680},"deep-dive-into-gitlabs-ux-design-process","content:en-us:blog:deep-dive-into-gitlabs-ux-design-process.yml","Deep Dive Into Gitlabs Ux Design Process","en-us/blog/deep-dive-into-gitlabs-ux-design-process.yml","en-us/blog/deep-dive-into-gitlabs-ux-design-process",{"_path":1706,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1707,"content":1713,"config":1719,"_id":1721,"_type":14,"title":1722,"_source":16,"_file":1723,"_stem":1724,"_extension":19},"/en-us/blog/deep-dive-investigation-of-gitlab-packages",{"title":1708,"description":1709,"ogTitle":1708,"ogDescription":1709,"noIndex":6,"ogImage":1710,"ogUrl":1711,"ogSiteName":667,"ogType":668,"canonicalUrls":1711,"schema":1712},"A deep dive into how we investigate and secure GitLab packages","Supply chain attacks aren't new, but that doesn't mean extra vigilance and protection aren't needed. We take a look at how we secure our packages and registries.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682004/Blog/Hero%20Images/gabriel-sollmann-unsplash.jpg","https://about.gitlab.com/blog/deep-dive-investigation-of-gitlab-packages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A deep dive into how we investigate and secure GitLab packages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vitor Meireles De Sousa\"}],\n        \"datePublished\": \"2021-05-27\",\n      }",{"title":1708,"description":1709,"authors":1714,"heroImage":1710,"date":1716,"body":1717,"category":720,"tags":1718},[1715],"Vitor Meireles De Sousa","2021-05-27","\n\nRecent high-profile supply chain and dependency confusion attacks have been a cross-industry wake-up call on the impact breadth and depth these value-chain or third-party attacks can have on customers, business operations, and brand reputation.\nSecurity teams know supply chain attacks aren't new – they've been around for decades. But, what may have once been considered mainly nation-state threats have now increased in prevalence and sophistication. Malicious actors are now setting their sights on widely used technology like software applications and code repositories to compromise unsuspecting suppliers.\n\n## So how do we protect our customers and product?\n\nWe're doing deep dives and making improvements across our product, processes, and practices as well as the controls we have in place for our partner and third-party vendor ecosystem to fortify the security of our supply chain. This blog post details our early steps to ensure packages and registries operate the way we expect them to and are continually monitored and secured.\n\nBack in December of 2020, we talked about the work our [Security Research](/handbook/security/#security-research) team is doing to identify malicious packages through the development of a [tool called Package Hunter](/blog/how-we-made-gitlab-more-secure-in-twenty-twenty/). Package Hunter uses dynamic behavior analysis to identify malicious packages that try to exfiltrate sensitive data or run unintended code. It's currently running in our internal pipelines at GitLab, providing our code reviewers with valuable information when reviewing dependency updates. We currently plan to open source Package Hunter in the near future (watch this space!) and integrate it with [GitLab CI](/topics/ci-cd/), so that you can run it in your own pipelines. By making Package Hunter available to the wider community, we hope to put users in a position to proactively detect unexpected dependency behavior, such as the behavior exhibited in the recent Dependency Confusion attacks, and contribute to the security of CI environments.\n\n## A look at GitLab package managers\n\nGitLab has an [open core](/company/stewardship/) business model and is proud to ship open and source-available source code which has been built in part by members of the GitLab community.\n\nTo help our customers in their development process, GitLab offers several package managers, but we mainly use three programming languages:\n\n* Ruby\n* Javascript\n* Go\n\nWe also provide package registries for different types of packages managers, the following being the most popular:\n\n* Composer\n* Conan\n* Go\n* Maven\n* Npm\n* NuGet\n* Pypi\n\nAs well as a container registry (to store Docker images) and a storage proxy for your frequently-used Docker images.\n\n### How dependency confusion happens\n\nAs we saw in the recent [high-profile novel supply chain attacks](https://medium.com/@alex.birsan/dependency-confusion-4a5d60fec610), dependency confusion attacks are a logic flaw in the default way that software development tools pull in third-party packages from public and private repositories. Malicious actors can exploit this issue and \"trick\" an environment into pulling in a malicious package instead of the intended custom package.\n\nFor a dependency confusion to happen, there are some conditions that need to be met, like:\n\n* The existence of a private package that has not yet been published to an official package registry (i.e., https://npmjs.org)\n* A package manager client configured in a way that prefers the official package registry\n\nWhile controlling the user environment is challenging, we can and should make sure that the behavior of our GitLab package registries is as intended and secure.\n\n### Investigating the behavior of package registries\n\nTo investigate, we opened an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/321423) to review the behavior of our package registries and also some more dangerous aspects like the ability to run pre/post install scripts, override packages that are supported by package managers or using `--extra-index-url` with PyPi. Check out these instructions on [how to install a PyPI package](https://docs.gitlab.com/ee/user/packages/pypi_repository/#install-a-pypi-package).\n\n#### The TL;DR on our package registry checks\n\nLong story short: Of the multiple packages GitLab offers, only the npm package registry checks the official package registry, [npmjs.org](https://www.npmjs.com/package/npmjs.org), and this comes after verifying the presence of a package on gitlab.com. This means the implementations of our package managers follow best practices! 💪\n\nAnother interesting area we explored more deeply is the variety of ways one could maliciously use a package to interact or obtain information from a system. Thankfully, we are already checking for suspicious behavior like this with Package Hunter.\n\n## Beyond registry investigation\n\n### Dependencies of our Ruby codebase\n\nReviewing our registries wasn't enough. We have an important list of Ruby projects (about 300), and verifying if we were impacted was relatively easy. Thanks to a tool developed by my teammate and senior Security engineer, [Michael Henriksen](/company/team/#mhenriksen), I was able to quickly grab the Gemfiles to check and extract the source to make sure we are using the official [https://rubygems.org](https://rubygems.org). Our investigation indicates this was the case.\n\n### Verifying and updating NPM\n\nJavaScript is the second most frequently used programming language, so we needed to be sure that all our packages (around 160) were present on npmjs.org. This investigation showed us one package was not present: `@conventionalcomments/cc-parse`, a package that was developed by a previous team member. While we do use it internally, we had no reason to keep it only on gitlab.com. To ensure this didn't become an issue in the future we decided to [publish the package](https://www.npmjs.com/package/cc-parse) on npmjs.org.\n\n### Referencing Go\n\nDue to the way Go modules work, confusion attacks are not possible. Other types of attacks are possible, however, and I recommend reading [Michael Henriksen](/company/team/#mhenriksen)' blog post the summarizes his research, [\"Finding Evil Go Packages\"](https://michenriksen.com/blog/finding-evil-go-packages/).\n\nReferencing Go packages is very simple: You just need to provide the package URL such as `import \"github.com/stretchr/testify\"` and that's it. Any URL can be provided, which makes evaluating legitimate Go packages difficult. Nevertheless, we're looking at how we can close the gap and better protect customers using Go packages.\n\n## How do we avoid confusion attacks?\n\nCurrently only the npm package registry supports forwarding requests to npmjs.org when nothing is found on gitlab.com, this is an option which is [enabled by default](https://docs.gitlab.com/ee/administration/settings/continuous_integration.html#npm-forwarding) for self-managed users and currently enabled on our SaaS offering. Implementation of new package registries will make sure we always check first on GitLab prior to searching in public official registries.\n\n### Control the chaos\n\nWe recently published a blog post around how [GitLab helps protect against supply chain attacks](/blog/devops-platform-supply-chain-attacks/), including ways that customers can combine our powerful DevSecOps platform with a holistic security program to quickly gain control and visibility of their software supply chain.\n\nIn 2021, our plan is to introduce a new product category aptly called the Dependency Firewall. We believe that this planned set of features will help users prevent suspicious dependencies from being downloaded. As it stands today, the anticipated new product would include the ability to:\n\n* Verify package integrity from one single place. Users will be able to see what has been changed and test those packages for security vulnerabilities.\n* Filter the available upstream packages to include only approved, allow-listed packages.\n* Delay updates from packages that have been recently updated under suspicious circumstances. For example, users will be able to delay any packages in which the following circumstances have occurred:\n     * Author change\n     * Author information change\n     * Programming language change\n     * Activity after a long period of inactivity\n     * Large code changes\n     * [Introduction of an executable](https://blog.reversinglabs.com/blog/mining-for-malicious-ruby-gems)\n     * [Executable files with a non-executable extension like .png](https://blog.reversinglabs.com/blog/mining-for-malicious-ruby-gems)\n     * Name very similar to a popular package (typosquatting)\n* Audit and mirror every dependency to ensure users are running and requiring developers to take an active, documented role in vetting external dependencies.\n\nThanks to [Tim Rizzi](/company/team/#trizzi) for their contributions to this section.\n{: .note.text-center}\n\nSupply chain attacks are ongoing and increasing. So too then must be the work, vigilance and research of our security teams. We'll continue sharing information about the ways we're making our product stronger and more secure, but if you've got a specific question or topic area that you'd like to hear from us about, leave us a comment or get in touch with me on Twitter [@muffinbox33](https://twitter.com/Muffinbox33).\n\nCover image by [Gabriel Sollmann](https://unsplash.com/@gabons?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/storage?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[720,9],{"slug":1720,"featured":6,"template":680},"deep-dive-investigation-of-gitlab-packages","content:en-us:blog:deep-dive-investigation-of-gitlab-packages.yml","Deep Dive Investigation Of Gitlab Packages","en-us/blog/deep-dive-investigation-of-gitlab-packages.yml","en-us/blog/deep-dive-investigation-of-gitlab-packages",{"_path":1726,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1727,"content":1733,"config":1739,"_id":1741,"_type":14,"title":1742,"_source":16,"_file":1743,"_stem":1744,"_extension":19},"/en-us/blog/delayed-replication-for-disaster-recovery-with-postgresql",{"title":1728,"description":1729,"ogTitle":1728,"ogDescription":1729,"noIndex":6,"ogImage":1730,"ogUrl":1731,"ogSiteName":667,"ogType":668,"canonicalUrls":1731,"schema":1732},"How we used delayed replication for disaster recovery with PostgreSQL","Replication is no backup. Or is it? Let's take a look at delayed replication and how we used it to recover from accidental label deletion.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683349/Blog/Hero%20Images/mathew-schwartz-397471-unsplash.jpg","https://about.gitlab.com/blog/delayed-replication-for-disaster-recovery-with-postgresql","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used delayed replication for disaster recovery with PostgreSQL\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andreas Brandl\"}],\n        \"datePublished\": \"2019-02-13\",\n      }",{"title":1728,"description":1729,"authors":1734,"heroImage":1730,"date":1736,"body":1737,"category":743,"tags":1738},[1735],"Andreas Brandl","2019-02-13","\nThe [infrastructure team](/handbook/engineering/infrastructure/) at GitLab is responsible for the operation of [GitLab.com](https://gitlab.com/), the largest GitLab instance in existence: With about 3 million users and nearly 7 million projects, it is one of the largest single-tenancy, open source SaaS sites on the internet. The PostgreSQL database system is a critical part of the infrastructure that powers GitLab.com and we employ various strategies to provide resiliency against all kinds of data-loss-inducing disasters. Those are highly unlikely of course, but we are well prepared with backup and replication mechanisms to recover from these scenarios.\n\nIt's a misconception to think of replication as a means to back up a database ([see below](#summing-up)). However, in this post, we're going to explore the power of delayed replication to recover data after an accidental deletion: On [GitLab.com](https://gitlab.com), a user [deleted a label](https://gitlab.com/gitlab-com/gl-infra/production/issues/509) for the [`gitlab-ce`](https://gitlab.com/gitlab-org/gitlab-ce/) project, thereby also losing the label's association with merge requests and issues.\n\nWith a delayed replica in place, we were able to recover and restore that data in under 90 minutes. We'll look into that process and how delayed replication helped to achieve this.\n\n### Point-in-time recovery with PostgreSQL\n\nPostgreSQL comes with a built-in feature to recover the state of a database to a certain point in time. This is called *[Point-in-Time Recovery](https://www.postgresql.org/docs/current/continuous-archiving.html)* (PITR), which leverages the same mechanics that are used to keep a replica up to date: Starting from a consistent snapshot of the whole database cluster (a *basebackup*), we apply the sequence of changes to the database state until a certain point in time has been reached.\n\nIn order to use this feature for a cold backup, we regularly take a basebackup of the database and store this in the *archive* (at GitLab, we keep the archive in [Google Cloud Storage](https://cloud.google.com/storage/)). Additionally, we keep track of changes to the database state by archiving the [*write-ahead log*](https://www.postgresql.org/docs/current/wal-intro.html) (WAL). With that in place, we can perform PITR to recover from a disaster: Start with a snapshot that was taken before the disaster happened and apply changes from the WAL archive until right before the disastrous event.\n\n### What is delayed replication?\n\n*Delayed replication* is the idea of applying time-delayed changes from the WAL. That is, a transaction that is committed at physical time `X` is only going to be visible on a replica with delay `d` at time `X + d`.\n\nFor PostgreSQL, there are two ways of setting up a physical replica of the database: *Archive recovery* and *streaming replication*. [Archive recovery](https://www.postgresql.org/docs/11/archive-recovery-settings.html) essentially works like PITR but in a continuous way: We keep retrieving changes from the WAL archive and apply them to the replica state in a continuous fashion. On the other hand, [streaming replication](https://wiki.postgresql.org/wiki/Streaming_Replication) directly retrieves the WAL stream from an upstream database host. We prefer archive recovery for delayed replication because it is simpler to manage and delivers an adequate level of performance to keep up with the production cluster.\n\n### How to set up delayed archive recovery\n\nConfiguration of [recovery options](https://www.postgresql.org/docs/11/recovery-config.html) mostly go into `recovery.conf`. Here's an example:\n\n```\nstandby_mode = 'on'\nrestore_command = '/usr/bin/envdir /etc/wal-e.d/env /opt/wal-e/bin/wal-e wal-fetch -p 4 \"%f\" \"%p\"'\nrecovery_min_apply_delay = '8h'\nrecovery_target_timeline = 'latest'\n```\n\nWith these settings in place, we have configured a delayed replica with archive recovery. It uses [wal-e](https://github.com/wal-e/wal-e) to retrieve WAL segments (`restore_command`) from the archive and delays application of changes by eight hours (`recovery_min_apply_delay`). The replica is going to follow any timeline switches present in the archive, e.g. caused by a failover in the cluster (`recovery_target_timeline`).\n\nIt is possible to configure streaming replication with a delay using `recovery_min_apply_delay`. However, there are a few pitfalls regarding replication slots, hot standby feedback, and others that one needs to be aware of. In our case, we avoid them by replicating from the WAL archive instead of using streaming replication.\n\nIt is worth noting that `recovery_min_apply_delay` was only introduced in PostgreSQL 9.4. However, in previous versions, a delayed replica is typically implemented with a combination of [recovery management functions](https://www.postgresql.org/docs/9.3/functions-admin.html) (`pg_xlog_replay_pause(), pg_xlog_replay_resume()`) or by withholding WAL segments from the archive for the duration of the delay.\n\n### How does PostgreSQL implement it?\n\nIt is particularly interesting to see how PostgreSQL implements delayed recovery. So let's look at [`recoveryApplyDelay(XlogReaderState)`](https://gitlab.com/postgres/postgres/blob/c24dcd0cfd949bdf245814c4c2b3df828ee7db36/src/backend/access/transam/xlog.c#L6124) below. It is called from the [main redo apply loop](https://gitlab.com/postgres/postgres/blob/c24dcd0cfd949bdf245814c4c2b3df828ee7db36/src/backend/access/transam/xlog.c#L7196) for each record read from WAL.\n\n```c\nstatic bool\nrecoveryApplyDelay(XLogReaderState *record)\n{\n\tuint8\t\txact_info;\n\tTimestampTz xtime;\n\tlong\t\tsecs;\n\tint\t\t\tmicrosecs;\n\n\t/* nothing to do if no delay configured */\n\tif (recovery_min_apply_delay \u003C= 0)\n\t\treturn false;\n\n\t/* no delay is applied on a database not yet consistent */\n\tif (!reachedConsistency)\n\t\treturn false;\n\n\t/*\n\t * Is it a COMMIT record?\n\t *\n\t * We deliberately choose not to delay aborts since they have no effect on\n\t * MVCC. We already allow replay of records that don't have a timestamp,\n\t * so there is already opportunity for issues caused by early conflicts on\n\t * standbys.\n\t */\n\tif (XLogRecGetRmid(record) != RM_XACT_ID)\n\t\treturn false;\n\n\txact_info = XLogRecGetInfo(record) & XLOG_XACT_OPMASK;\n\n\tif (xact_info != XLOG_XACT_COMMIT &&\n\t\txact_info != XLOG_XACT_COMMIT_PREPARED)\n\t\treturn false;\n\n\tif (!getRecordTimestamp(record, &xtime))\n\t\treturn false;\n\n\trecoveryDelayUntilTime =\n\t\tTimestampTzPlusMilliseconds(xtime, recovery_min_apply_delay);\n\n\t/*\n\t * Exit without arming the latch if it's already past time to apply this\n\t * record\n\t */\n\tTimestampDifference(GetCurrentTimestamp(), recoveryDelayUntilTime,\n\t\t\t\t\t\t&secs, &microsecs);\n\tif (secs \u003C= 0 && microsecs \u003C= 0)\n\t\treturn false;\n\n\twhile (true)\n\t{\n        // Shortened:\n        // Use WaitLatch until we reached recoveryDelayUntilTime\n        // and then\n        break;\n\t}\n\treturn true;\n}\n```\n\nThe takeaway here is that the delay is based on the physical time that was recorded with the commit timestamp of the transaction (`xtime`). We can also see that the delay is only applied to commit records but not to other types of records: Any data changes are directly applied but the corresponding commit is delayed, so these changes only become visible after the configured delay.\n\n### How to use a delayed replica to recover data\n\nLet's say we have a production database cluster and a replica with eight hours of delay. How do we use this to recover data? Let's look at how this worked in the case of the [accidental label deletion](https://gitlab.com/gitlab-com/gl-infra/production/issues/509).\n\nAs soon as we were aware of the incident, we [paused archive recovery](https://www.postgresql.org/docs/9.3/functions-admin.html) on the delayed replica:\n\n```sql\nSELECT pg_xlog_replay_pause();\n```\n\nPausing the replica eliminated the risk of the replica replaying the `DELETE` query. This is useful if you need more time to investigate.\n\nThe recovery approach is to let the delayed replica catch up until right before the point the `DELETE` query occurred. In our case we knew roughly the physical time of the `DELETE` query. We removed `recovery_min_apply_delay` and added `recovery_target_time` to `recovery.conf`. This effectively lets the replica catch up as fast as possible (no delay) until a certain point in time:\n\n```\nrecovery_target_time = '2018-10-12 09:25:00+00'\n```\n\nWhen operating with physical timestamps, it's worth adding a little margin for error. Obviously, the bigger the margin, the bigger the data loss. On the other hand, if the replica recovers beyond the actual incident timestamp it also replays the `DELETE` query and we would have to start over (or worse: use a cold backup to perform PITR).\n\nAfter restarting the delayed Postgres instance, we saw a lot of WAL segments being replayed until the target transaction time was reached. In order to get a sense of the progress during this phase, we can use this query:\n\n```sql\nSELECT\n  -- current location in WAL\n  pg_last_xlog_replay_location(),\n  -- current transaction timestamp (state of the replica)\n  pg_last_xact_replay_timestamp(),\n  -- current physical time\n  now(),\n  -- the amount of time still to be applied until recovery_target_time has been reached\n  '2018-10-12 09:25:00+00'::timestamptz - pg_last_xact_replay_timestamp() as delay;\n```\n\nWe know recovery is complete when the replay timestamp does not change any more. We can consider setting a [`recovery_target_action`](https://www.postgresql.org/docs/11/recovery-target-settings.html) in order to shut down, promote or pause the instance once replay has completed (the default is to pause).\n\nThe database is now in the state preceding the disastrous query. We can start to export data or otherwise make use of the database. In our case, we exported information about the label that was deleted and its association with issues and merge requests and imported that data into our production database. In other cases with more severe data loss, it can be favorable to promote the replica and continue to use it as a primary. However this means that we lose any data that was entered into the database after the point in time we recovered to.\n\nA more precise alternative to using physical timestamps for targeted recovery is using transaction ids. It is good practice to log transaction ids for e.g. DDL statements (like `DROP TABLE`) using `log_statements = 'ddl'`. If we had a transaction id at hand, we could have used `recovery_target_xid` instead in order to replay to the transaction that preceded the `DELETE` query.\n\nFor the delayed replica, the way back to normal is simple: Revert changes to `recovery.conf` and restart Postgres. After a while, the replica is going to show a delay of eight hours again – ready for any future disasters.\n\n### Benefits for recovery\n\nThe major benefit from a delayed replica over using a cold backup is that it eliminates the step of restoring a full snapshot from the archive. This can easily take hours, depending on network and storage speeds. In our case, it takes roughly five hours to retrieve the full ~2TB basebackup from the archive. In addition to that, we would have to apply 24 hours' worth of WAL in order to recover to the desired state (in the worst case).\n\nWith a delayed replica in place, we get two benefits over a cold backup:\n\n1. No need to retrieve a full basebackup from the archive and\n2. we have a *fixed* window of eight hours' worth of WAL that needs to be replayed to catch up.\n\nIn addition to that, we continuously test our ability to perform PITR from the WAL archive and would quickly realize WAL archive corruption or other WAL-related problems by monitoring the lag of the delayed replica.\n\nIn our example case, completing recovery took 50 minutes and translated to a recovery rate of 110 GB worth of WAL per hour (the archive was still on [AWS S3](https://aws.amazon.com/s3/) at that time). The incident was mitigated and data recovered and restored 90 minutes after work was started.\n\n### Summing up: Where delayed replication can be useful (and where it's not)\n\nDelayed replication can be used as a first resort to recover from accidental data loss and lends itself perfectly to situations where the loss-inducing event is noticed within the configured delay.\n\nLet's be clear though: *Replication is not a backup mechanism*.\n\nBackup and replication are two mechanisms with distinct purposes: A *cold backup* is useful to recover from a disaster, for example an accidental `DELETE` or `DROP TABLE` event. In this case, we utilize a backup from cold storage to restore an earlier state of a table or the whole database. On the other hand, a `DROP TABLE` query replicates nearly instantly to all replicas in a running cluster – hence normal replication on its own is not useful to recover from this scenario. Instead, the purpose of *replication* is mostly to guard database availability against failures of individual database servers and to distribute load.\n\nEven with a delayed replica in place, there are situations where we really want a cold backup that is stored in a safe place: data center failures, silent corruption, or other events that aren't visible right away, are prime candidates to rely on cold backups. With replication only, we'd be out of luck.\n\nNote: For [GitLab.com](https://gitlab.com/), we currently only provide system-level resiliency against data loss and do not provide user-level data recovery in general.\n\nPhoto by [Mathew Schwartz](https://unsplash.com/photos/sb7RUrRMaC4?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,745],{"slug":1740,"featured":6,"template":680},"delayed-replication-for-disaster-recovery-with-postgresql","content:en-us:blog:delayed-replication-for-disaster-recovery-with-postgresql.yml","Delayed Replication For Disaster Recovery With Postgresql","en-us/blog/delayed-replication-for-disaster-recovery-with-postgresql.yml","en-us/blog/delayed-replication-for-disaster-recovery-with-postgresql",{"_path":1746,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1747,"content":1753,"config":1760,"_id":1762,"_type":14,"title":1763,"_source":16,"_file":1764,"_stem":1765,"_extension":19},"/en-us/blog/demystifying-ci-cd-variables",{"title":1748,"description":1749,"ogTitle":1748,"ogDescription":1749,"noIndex":6,"ogImage":1750,"ogUrl":1751,"ogSiteName":667,"ogType":668,"canonicalUrls":1751,"schema":1752},"GitLab environment variables demystified","CI/CD variables are useful (and flexible) tools to control jobs and pipelines. We unpack everything you need to know about GitLab environment variables.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664679/Blog/Hero%20Images/blog-image-template-1800x945__24_.png","https://about.gitlab.com/blog/demystifying-ci-cd-variables","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab environment variables demystified\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2021-04-09\",\n      }",{"title":1748,"description":1749,"authors":1754,"heroImage":1750,"date":1756,"body":1757,"category":743,"tags":1758,"updatedDate":1759},[1755],"Veethika Mishra","2021-04-09","There is a lot of flexibility when it comes to defining and using variables for [CI/CD](https://about.gitlab.com/topics/ci-cd/). Variables are extremely useful for controlling jobs and pipelines, and they help you avoid hard-coding values in your `.gitlab-ci.yml` configuration file. The information in this post should weave a larger picture by bringing together all (or most) of the information around defining and handling variables, making it easier to understand the scope and capabilities. Relevant documentation is linked throughout the post.\n\nIn [GitLab CI/CD](https://docs.gitlab.com/ee/ci/), variables can be used to customize jobs by defining and storing values. When using variables there is no need to hard code values. In GitLab, CI/CD variables can be defined by going to **Settings >> CI/CD >> Variables**, or by simply defining them in the `.gitlab-ci.yml` file.\n\nVariables are useful for configuring third-party services for different deployment environments, such as `testing`, `staging`, `production`, etc. Modify the services attached to those environments by simply changing the variable that points to the API endpoint the services need to use. Also use variables to configure jobs and then make them available as environment variables within the jobs when they run.\n\n![GitLab reads the .gitlab-ci.yml file to scan the referenced variable and sends the information to the GitLab Runner. The variables are exposed on and output by the runner.](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variables_processing.jpeg)\n\n## The relationship between variables and environments\n\nSoftware development as a process includes stages to test a product before rolling it out to users. [Environments](https://docs.gitlab.com/ee/ci/environments/) are used to define what those stages look like and it may differ between teams and organizations.\n\nOn the other hand, variables are data values that are likely to change as a result of user interaction with a product. For example, their age, preference, or any input you could possibly think of that might determine their next step in the product task-flow.\n\nWe often hear the term [environment variable](https://docs.gitlab.com/ee/administration/environment_variables.html). These are variables that are defined in a given environment, but outside the application. GitLab CI/CD variables provide developers with the ability to configure values in their code. Using variables is helpful because it ensures that the code is flexible. GitLab CI/CD variables allow users to modify an application deployed to a certain environment without making any change to code. It is simple to run tests or even integrate third-party services by changing a configuration environment variable outside the application.\n\n## The scope of variables for CI/CD\n\n![Order of precedence for CI/CD variables: 1) Manual pipeline run, trigger and schedule pipeline variables, 2) Project level, group level, instance level protected variables, 3) Inherited CI/CD variables, 4) Job level, global yml defined variables, 5) Deployment variables, 6) Pre-defined CI/CD variables](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variables_precedence.jpeg)\n\n### `.gitlab-ci.yml` defined variables\n\nVariables that need to be available in the job environment can be added to GitLab. These CI/CD variables are meant to store non-sensitive project configuration, like the database URL in the `.gitlab-ci.yml` file. Reuse this variable in multiple jobs or scripts, wherever the value is needed. If the value changes, you only need to update the variable once, and the change is reflected everywhere the variable is used.\n\n### Project CI/CD variables\n\nMoving a step above the repository-specific requirements, you can define CI/CD variables in [project settings](https://docs.gitlab.com/ee/ci/variables/#for-a-project), which makes them available to CI/CD pipelines. These are stored out of the repository (not in the `.gitlab-ci.yml` file), but are still available to use in the CI/CD configuration and scripts. Storing the variables outside the `.gitlab-ci.yml` file keeps these values limited to a project-only scope, and not saved in plain text in the project.\n\n### Group and instance CI/CD variables\n\nSome variables are relevant at the group level, or even instance level, and could be useful to all projects in a group or instance. Define the variables in the [group or instance settings](https://docs.gitlab.com/ee/ci/variables/#for-a-group) so all projects within those scopes can use the variables without actually needing to know the value  or having to create the variables for the lower scope. For example, a common value that needs to be updated in multiple projects can be easily managed if it stays up-to-date in a single place. Alternatively, multiple projects could use a specific password without actually needing to know the value of the password itself.\n\n## Jobs and pipelines as environments\n\nGitLab CI/CD variables, besides being used as environment variables, also work in the scope of the `.gitlab-ci.yml` configuration file to configure pipeline behavior, unrelated to any environment. The variables can be stored in the project/group/instance settings and be made available to jobs in pipelines.\n\nFor example:\n\n```  \njob:  \n  rules:  \n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH  \n  script:  \n  - echo \"This job ran on the $CI_COMMIT_BRANCH branch.\"  \n```\n\nThe variable `($CI_COMMIT_BRANCH)` in the script section runs in the scope of the job in which it was defined. This scope is the \"job environment\" – meaning, when the job starts, the GitLab runner starts up a Docker container and runs the job in that environment. The runner will make that variable (and all other predefined or custom variables) available to the job, and it can display their value in the log output if needed.\n\nBut the variable is **also** used in the `if:` section to determine when the job should run. That in itself is not an environment, which is why we call these CI/CD variables. They can be used to dynamically configure your CI/CD jobs, **as well** as be used as environment variables when the job is running.\n\n## Predefined variables\n\nA number of variables are [predefined](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) when a GitLab CI/CD pipeline starts. A user can immediately access values for things like commit, project, or pipeline details without needing to define the variables themselves.\n\n## Custom CI/CD variables\n\n![Runners can create two kinds of custom CI/CD variables: Type and File.](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variable_types.jpeg)\n\nWhen creating a CI/CD variable in the settings, GitLab gives the user more configuration options for the variable. Use these extra configuration options for stricter control over more sensitive variables:\n\n**Environment scope:** If a variable only ever needs to be used in one specific environment, set it to only ever be available in that environment. For example, you can set a deploy token to only be available in the `production` environment.\n\n**Protected variables:** Similar to the environment scope, you can set a variable to be available only when the pipeline runs on a protected branch, like your default branch.\n\n**Variable type:** A few applications require configuration to be passed to it in the form of a file. If a user has an application that requires this configuration, just set the type of variable as a \"File\". Configuring the CI/CD variable this way means that when the runner makes the variable available in the environment, it actually writes it out to a temporary file, and stores the path to the file as the value. Next, a user can pass the path to the file to any applications that need it.\n\nAlong with the listed ways of defining and using variables, GitLab introduced a feature that generates pre-filled variables when there's a need to run a pipeline manually. Prefilled variables reduce the chances of running into an error and makes running the pipeline easier.\n\n**Masked variables:** [Masked variables](https://docs.gitlab.com/ee/ci/variables/#mask-a-cicd-variable) are CI variables that have been **hidden in job logs** to prevent the variable’s value from being displayed. \n\n**Masked and hidden variables:** Introduced in [GitLab 17.4](https://about.gitlab.com/releases/2024/09/19/gitlab-17-4-released/#hide-cicd-variable-values-in-the-ui), [Masked and hidden](https://docs.gitlab.com/ee/ci/variables/#hide-a-cicd-variable) variables provide the same masking feature from job logs and **keep the value hidden** **in the Settings UI**. We do not recommend using either of these variables for sensitive data (e.g. secrets) as they can be inadvertently exposed. \n\n## Secrets\n\nA secret is a sensitive credential that should be kept confidential. Examples of a secret include:\n\n* Passwords  \n* SSH keys  \n* Access tokens  \n* Any other types of credentials where exposure would be harmful to an organization\n\nGitLab currently enables its users to [use external secrets in CI](https://docs.gitlab.com/ee/ci/secrets/), by leveraging HashiCorp Vault, Google Cloud Secret Manager, and Azure Key Vault to securely manage keys, tokens, and other secrets at the project level. This allows users to separate these secrets from other CI/CD variables for security reasons.\n\n### GitLab Secrets Manager\n\nBesides providing support for external secrets in CI, GitLab is also working on introducing a [native solution to secrets management](https://gitlab.com/groups/gitlab-org/-/epics/10108) to securely and conveniently store secrets within GitLab. This solution will also help customers use the stored secrets in GitLab specific components and environments, and easily manage access at namespace groups and projects level. \n\n## Read more\n* [GitLab native secrets manager to give software supply chain security a boost](https://about.gitlab.com/blog/gitlab-native-secrets-manager-to-give-software-supply-chain-security-a-boost/)\n\n***Disclaimer:** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.*\n",[1293,677,9,1090,109,1294],"2025-01-13",{"slug":1761,"featured":6,"template":680},"demystifying-ci-cd-variables","content:en-us:blog:demystifying-ci-cd-variables.yml","Demystifying Ci Cd Variables","en-us/blog/demystifying-ci-cd-variables.yml","en-us/blog/demystifying-ci-cd-variables",{"_path":1767,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1768,"content":1774,"config":1780,"_id":1782,"_type":14,"title":1783,"_source":16,"_file":1784,"_stem":1785,"_extension":19},"/en-us/blog/designing-alerts-and-incidents",{"title":1769,"description":1770,"ogTitle":1769,"ogDescription":1770,"noIndex":6,"ogImage":1771,"ogUrl":1772,"ogSiteName":667,"ogType":668,"canonicalUrls":1772,"schema":1773},"Designing an incident management workflow from scratch and where its used","See here how to create a single workflow for triaging alerts and resolving incidents using GitLab's Product Development Flow","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670750/Blog/Hero%20Images/designing-incidents-alerts.jpg","https://about.gitlab.com/blog/designing-alerts-and-incidents","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Designing an incident management workflow from scratch and where its used\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Amelia Bauerly\"}],\n        \"datePublished\": \"2020-11-03\",\n      }",{"title":1769,"description":1770,"authors":1775,"heroImage":1771,"date":1777,"body":1778,"category":698,"tags":1779},[1776],"Amelia Bauerly","2020-11-03","\n{::options parse_block_html=\"true\" /}\n\n\n\nMany companies stitch together multiple tools to handle alerts and incidents, which can be time-consuming and frustrating. Why should teams have to use so many tools for what is, essentially, a single workflow?\n\nWe hear you, and we think we’ve come up with some great new features to help alleviate this problem. \n\nAt GitLab, the Monitor team has been busily working behind the scenes to improve our offerings for [Alerts](https://about.gitlab.com/releases/2020/06/22/gitlab-13-1-released/#manage-it-alerts-in-gitlab) and [Incident Management](https://about.gitlab.com/releases/2020/08/22/gitlab-13-3-released/#create-and-manage-it-incidents-in-gitlab). \n\n## What’s changed? \n\nYou can now send alerts from your monitoring tools straight to GitLab, where they will be displayed for you and your team to review. If an alert is serious enough, you can escalate that alert to an incident, a newly defined type of issue crafted specifically for this purpose. Once the incident is created, you can push the fixes immediately: all within a single tool.\n\nWe’re incredibly proud of what we’re creating but, how did we get here? How did we take what was a blank space and turn it into something that people could use? Dare I say, might even _want_ to use? \n\nThe short answer: by working through GitLab’s [Product Development Flow](https://about.gitlab.com/handbook/product-development-flow/), by leaning on our value of [iteration](https://handbook.gitlab.com/handbook/values/#iteration), and collaborating closely with the people who use GitLab every day.\n\n## Validating the problem\n\nThe first thing we needed to do was to ensure we understood what people were struggling with, in their current workflow, with their current tools. We call this [Problem Validation](https://about.gitlab.com/handbook/product-development-flow/#validation-phase-2-problem-validation), and it means asking the following question before getting started with any work: Do we clearly understand the problem(s) end-users have?\n\nAs part of the problem validation process, we reached out to Developers, SREs, and [DevOps](/topics/devops/) engineers. We wanted to better understand what tools they were using, what their current workflows were, and if there were any gaps in their workflows that we could fill within GitLab.\n\nThrough our research, we discovered something that was both a serious pain point  _and_  an opportunity for us at GitLab. Unsurprisingly, it turns out that many Developers are currently stitching together a multitude of tools for monitoring their applications, for creating and sending alerts, and for investigating and resolving the issues that are reported. \n\nStitching together all of these tools can work, but it’s messy for teams to manage. The context switching that’s required is difficult, and it means having to keep track of different pieces of information in multiple places. We heard, again and again, how burdensome and fatiguing this can be. What people need, instead, is an intentionally designed workflow for triaging alerts and responding to incidents. \n\nLuckily for us, GitLab already had many pieces of this workflow in place, in that Developers can currently raise issues, create merge requests, and deploy their code within our product. The opportunity, and what we were missing, was a place to review and triage alerts. \n\nIf we could introduce a single location where all alerts (from multiple tools) can be received, reviewed, resolved, or escalated into incidents, we could create a seamless incident management workflow within GitLab: from the alert being received to the incident being created, all the way through to the code fix being deployed.\n\n## Validating the solution\n\nWith the desired workflow pinned down, we started ideating on designs for triaging and managing alerts. After creating some initial concepts, we wanted to validate them to make sure we were actually solving the problems we had identified. \n\nFollowing our Product Development Flow for [solution validation](https://about.gitlab.com/handbook/product-development-flow/#validation-phase-4-solution-validation), we wanted to share our designs with the teams we thought would most benefit from using the features we designed. \n\nTo enable us to more quickly connect with the people who would be using our features, we decided to create a Special Interest Group (SIG). We went this route because we wanted to work more collaboratively with a well-defined group of people over a period of time. We felt that this could help us to understand their needs better, and it would mean we could check in with them more often, and on a more regular basis. \n\nThe SIG is composed of GitLab customers who are involved with responding to alerts and incidents within their organizations. To recruit this group, we sent out a survey to our [First Look](https://about.gitlab.com/community/gitlab-first-look/) members. When we had a short list of people who fit our criteria, we scheduled introductory meetings to learn more about them, find out how they worked, and explain a bit more about the SIG. After ensuring they were on board with our experiment, we invited them to join our SIG.\n\nAs we generated designs – first for an alert list, then for an alert detail page – we shared these designs with the SIG members during live, individual feedback sessions. During these sessions, we asked them to take a short usability test where we gave them an imagined scenario and asked them to complete a task. We also asked them for their feedback more generally, to understand if what we were proposing would help improve their current workflow. \n\nWe met with the SIG monthly over a period of several months. Each time they reviewed our designs, we revised them. The feature set we ultimately arrived at owes a great deal to their feedback and their commitment to improving GitLab as a product. \n\n![Alert list in GitLab](https://about.gitlab.com/images/blogimages/Alert-list-page.png){: .shadow.medium.center}\n\nAlert list in GitLab\n{: .note.text-center}\n\nAfter validating our proposals with the SIG members we broke our designs down into what we call a [Minimal Viable Change](https://about.gitlab.com/handbook/product/product-principles/#the-minimal-viable-change-mvc) (MVC) that, over the course of several months, our engineering team built into GitLab: starting with the alert list, and then adding in the alert detail page. Additional functionality, such as the ability to change the status of an alert from within GitLab, was built on top of those two base elements.\n\n## Introducing dedicated incidents\n\nMany alerts that are sent are not necessarily things that teams need to worry about. Maybe they are expected issues, or maybe they aren't things that need to be immediately addressed. But, what happens when an alert is serious enough to require additional investigation? What happens when the alert needs to become an incident?\n\nFor the [MVC](https://about.gitlab.com/handbook/product/ux/product-designer/#refine-mvc) version of alert management, alerts would be received and, if they were serious enough, the alert could be escalated to a GitLab issue. \n\nWe used our existing issue framework for incidents because it was an easy way for us to complete the larger workflow. From a GitLab issue, people can create an MR to fix whatever is causing the alert in the first place. Then they can push the code that will publish the fix. So, by using issues, we were able to approximate a full incident management workflow: from alert receipt to live code.\n\nHowever, in testing the alerting functionality we had built with our SIG, we learned that there were still many gaps in the experience of investigating and resolving incidents that issues couldn’t really help us fix. \n\nFor example, within incidents, you likely need quick access to various metrics or runbooks. Maybe you also need an incident timeline. There are hacky ways of making GitLab issues work for these purposes, but we wondered, \"Is there a way that we can better surface the information needed for quickly resolving incidents within issues?\"\n\nThese sorts of discussions ultimately resulted in us introducing dedicated incidents in GitLab. Incidents are a special kind of issue where the content displayed is updated to better fit the needs of people actively involved in investigating and resolving incidents. \n\nIn designing incidents, we removed items from our existing issues that were less relevant, replacing them with content that better fits the incident workflow. In both cases, we used feedback from customers to make decisions about what to include – and what to exclude. Our goal was to make sure that only the most relevant information is visible, so that incidents can be resolved as quickly and efficiently as possible.\n\n![An example dedicated incident in GitLab](https://about.gitlab.com/images/blogimages/dedicated-incident.png){: .shadow.medium.center}\n\nAn example dedicated incident in GitLab\n{: .note.text-center}\n\nCreating a dedicated type of issue for incidents hasn’t been a quick process! By relying on our iteration value, we’ve been slowly transforming the GitLab issue into an incident over the course of many months. Now, incidents are finally taking shape, and we are at the point where people have started using them as part of their workflows. **We’ve seen an increase in usage of 4,200%!**\n\nThis is a huge step for incident management at GitLab, and we’re delighted to see how people will use incidents at their organizations.\n\n## Up next: On-call Management\n\nThe final piece of incident management is on-call management: How does your team know that incidents are happening and need to be addressed? \n\nTo tackle this next batch of work, we’re going back to our Product Development Flow’s [Problem Validation](https://about.gitlab.com/handbook/product-development-flow/#validation-phase-2-problem-validation) step. We’re talking to people to ensure we understand their needs. Then we’ll start to think about designs for on-call schedules, escalation policies, and paging. We intend to build and release these features early next year. \n\nOnce these features are introduced, we will have enabled the end-to-end workflow for [incident management within GitLab](https://about.gitlab.com/direction/service_management/incident_management/), from triggered alerts through post-incident review. After that point, we’ll investigate how people are experiencing the features we’ve built and how we can further improve them. \n\nWe look forward to hearing your feedback, so we can continue to make incident management in GitLab even better. What do you want to see us build next? Leave a comment on the [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/271410) if you have any suggestions. Additionally, if you'd like to participate in our customer feedback sessions, consider joining our [First Look](https://about.gitlab.com/community/gitlab-first-look/) panel. We'd love for you to join us!\n\nCover image credit:\n\nCover image by [Kelly Sikkema](https://unsplash.com/@kellysikkema?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n",[700,1698,9],{"slug":1781,"featured":6,"template":680},"designing-alerts-and-incidents","content:en-us:blog:designing-alerts-and-incidents.yml","Designing Alerts And Incidents","en-us/blog/designing-alerts-and-incidents.yml","en-us/blog/designing-alerts-and-incidents",{"_path":1787,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1788,"content":1794,"config":1800,"_id":1802,"_type":14,"title":1803,"_source":16,"_file":1804,"_stem":1805,"_extension":19},"/en-us/blog/dev-strategy-review",{"title":1789,"description":1790,"ogTitle":1789,"ogDescription":1790,"noIndex":6,"ogImage":1791,"ogUrl":1792,"ogSiteName":667,"ogType":668,"canonicalUrls":1792,"schema":1793},"Tell us what you think about our Dev strategy","Take a look at how we're going to help you better manage, plan, and create.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668253/Blog/Hero%20Images/pencil2.jpg","https://about.gitlab.com/blog/dev-strategy-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tell us what you think about our Dev strategy\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Pundsack\"}],\n        \"datePublished\": \"2019-12-04\",\n      }",{"title":1789,"description":1790,"authors":1795,"heroImage":1791,"date":1797,"body":1798,"category":299,"tags":1799},[1796],"Mark Pundsack","2019-12-04","\n\nThis is the first in a series of posts diving into our strategy and plans for the GitLab product. This post focuses on the [Dev section](/handbook/product/categories/#dev-section) and is an excerpt of our public [direction page for Dev](/direction/dev/), which you can read for more detail. You can also watch our director of product for Dev, [Eric Brinkman](/company/team/#ebrinkman) present the strategy below. When you're done reading (or watching), please give us your feedback via the [survey](#survey) below!\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/mIpHEbyhsj0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n![Dev Overview](https://about.gitlab.com/images/direction/dev/dev-overview.png)\nSee how the GitLab product team plans to advance our Dev strategy over the next 12 months to three years.\n{: .note.text-center}\n\n## Overview of our Dev product\n\nBefore we dive into the our vision for the future of Dev at GitLab, we're providing some more context on our product and how it fits into the market.\n\nThe Dev section is made up of the [Manage](/handbook/product/categories/#manage-stage), [Plan](/handbook/product/categories/#plan-stage), and [Create](/handbook/product/categories/#create-stage) stages of the DevOps lifecycle. These stages mark the leftmost side of the DevOps lifecycle and primarily focus on the creation and development of software. The scope for Dev stages is wide and encompasses a number of analyst categories including [value stream management](/solutions/value-stream-management/), project portfolio management, enterprise agile planning tools, source code management, IDEs, design management, and even ITSM. It is difficult to truly estimate the total addressable market (TAMkt) for the Dev section, as our scope includes so many components from various industries, but research indicates the estimated [TAM](https://docs.google.com/spreadsheets/d/1HYi_l8v-wTE5-BUq_U29mm5aWNxnqjv5vltXdT4XllU/edit?usp=sharing) in 2019 is roughly ~$3B, growing to ~$7.5B in 2023 (26.5% CAGR).\n\nBased on [DevOps tool revenue](https://drive.google.com/file/d/1VvnJ5Q5PJzPKZ_oYBHGNuc6D7mtMmIZ_/view) at the end of 2018 and comparing to GitLab annual recurring revenue at the end of FY20 Q3, our estimated market share is approximately 1.5% based on revenue. (Note: this assumes we can attribute 100% of GitLab revenue to Dev stages.) Market share based on source code management is somewhere in the [30%](https://docs.google.com/document/d/15TLEUc9BxiiB9N33MW-7zGvfitfFXQj3TM8BfM2q4hM/edit?usp=sharing) range.\n\nNearly [half of organizations](https://drive.google.com/file/d/17ZSI2hGg3RK168KHktFOiyyzRA93uMbd/view?usp=sharing) still have not adopted DevOps methodologies, despite [data](https://drive.google.com/file/d/17MNecg84AepxWlSDB5HjNBrCJggaS9tP/view?usp=sharing) that indicates far higher revenue growth for organizations that do adopt these strategies. Migrating a code base to a modern, Git-backed source control platform like GitLab can often be the first step in a DevOps transformation. As such, we must provide industry-leading solutions in source code and code review, as this is not only the entry into DevOps for our customers, but typically the entry into the GitLab platform. Once a user has begun using repositories and code review features like merge requests, they often move “left” and “right” to explore and use other capabilities in GitLab, such as CI and project management features.\n\nPer our stage monthly active user data, the GitLab stages with the highest useage are Manage and Create. As such, these stages must focus on security fixes, bug fixes, performance improvements, UX improvements, and depth more than other areas of GitLab. Plan, while introduced in 2011 with the release of issue tracking, still falls far behind market leaders who have better experiences for sprint management, portfolio management, roadmapping, and workflows.\n\nOther areas, such as value stream management, are nascent to both GitLab and the market and will require more time devoted to executing problem and solution validation discovery.\n\nOver the next year, Dev will require focus on both breadth and depth activities, and each stage will require significant investment to accelerate the delivery of security issues, performance issues, and direction items.\n\n## Vision themes\n\nOur vision for the Dev section is to provide the world’s best product creation platform. We believe we have a massive opportunity to change how cross-functional, multi-level teams collaborate by providng an experience that breaks down organizational silos and enables better collaboration. We want to deliver a solution that enables higher-quality products to be created faster. The following themes are listed below to surface our view of what will be important to the market and to GitLab over the next three to five years. As such, they will be the cornerstone of our three-year strategy, and all activities in the one-year plan should advance GitLab in one or more of these areas.\n\n### Efficient and automated code review\n\nCode review should be a delightful experience for all involved in the process. Over time, we expect the code review process to evolve from where it is today to become a mostly automated process. Along the way, incremental improvements will occur, where developer platforms like GitLab will focus on performance and usability of the code review tools. Code review should be an efficient process, and the easier GitLab can make code review, the more efficient Dev teams become. [Research indicates that better code review should reduce the number of bugs](https://blog.semmle.com/code-review-metrics/) and increase the amount of higher-quality features an organization can ship. The code review process will continue to provide a venue for developers to learn and collaborate together.\n\nFor example, GitLab will:\n\n* Load large, multi-file diffs faster than any other comparable product on the market.\n* Provide tailored insights to the code reviewer, alerting them to the most important areas to review.\n* Allow for client- and server-side evaluation of code where possible, and integrate it into the code review process.\n\n### Measurement and increased efficiency of the value stream\n\nPeter Drucker has said “[If you can’t measure it, you can’t improve it](https://guavabox.com/if-you-cant-measure-it-you-cant-improve-it/)”. Many Dev teams have no way of measuring their efficiency, and even if they do, there is not enough feedback, information, or actionable insights to improve the efficiency of their team. Even then, once efficiency is improved, it can be difficult to tell if a team’s performance is good or bad, as there is often no point of comparison. Even the best performing team in an organization could be worse than the competition. Increasing efficiency is paramount to companies increasing their **time to value** and helping organizations answer **“Is my DevOps transformation working?”**\n\nWe believe efficiency can be improved in two ways. The first way is by improving existing value stream activities and making them more efficient. This focuses on making existing activities as fast as possible. The second way is to question and change the value stream into higher value-added activities at each step. GitLab’s vision is to help answer both of these questions: “Am I doing things fast enough?” and “Am I doing the right things?”\n\nToday, value stream management is largely focused on visualizing the value chain through deployment. GitLab is uniquely positioned to also visualize, track, and measure value chain activities to the right of deployment. For example, the value created by post-launch activities, such as press releases, blog posts, and marketing campaigns should funnel into value stream management, while providing the business with the right data and insights for their value chain.\n\nFor example, GitLab will provide:\n\n* Easy-to-use and customizable tools that measure the efficiency of the DevOps lifecycle.\n* Insight into areas of waste where teams can improve.\n* Recommendations based on large data sets of other teams using GitLab, for comparison.\n* A visual experience for value stream management that goes beyond code deployment.\n\n### DevOps for more personas\n\nDevOps started with the merging of Development and Operations and has since been augmented to include Security in some circles, [highlighting DevSecOps as the next trend](/topics/devsecops/). There are many other personas that are involved in software development, such as product managers, project managers, product designers, finance, marketing, procurement, etc. These personas will continue to expand until nearly every role at knowledge-work companies touches some facet of the DevOps lifecycle. Over time, organizations will realize that teams who work out of the same platform/set of tools are more efficient and deliver faster business and customer value.\n\nBecause of this trend, each persona of the DevOps lifecycle should ultimately be treated as a first-class citizen in GitLab.\n\nFor example, GitLab will provide:\n\n* A better experience for project management workflows.\n* A space for product designers to design and collaborate on designs with product managers and engineers.\n* A Web IDE experience that is able to run the GDK, serving collaborators of all skill sets and hardware, allowing them to contribute to GitLab.\n\n### Enterprise digital transformation\n\nWhile we will continue to solve for the [modern DevOps use case first](/handbook/product/product-principles/#modern-first), most enterprise customers have custom requirements that GitLab does not solve for today. This is a wide-ranging set of custom controls that spans systems such as permissions, approvals, compliance, governance, workflows, and requirements mapping. It is our belief these needs will exist for many years to come, and we will need to incorporate these to truly become a flexible [DevOps platform](/solutions/devops-platform/) that serves enterprise segments. We will strive to do this in ways that are modern and, where possible, adhere to a [“convention over configuration”](/handbook/product/product-principles/#convention-over-configuration) approach, living with the cognitive dissonance that sometimes flexibility will be required in areas we have not been willing to venture into thus far.\n\nAdditionally, compliance, auditing, and surfacing evidence of security/compliance posture will become more important as more GDPR-like legislation is enacted and passed into law. GitLab should make it easy to not only surface and deliver evidence for GitLab controls (i.e., who has access to GitLab, who did what on what group, etc.), but also to track and manage compliance requirements for various legislation our customers may be bound to.\n\nAs examples, GitLab will provide:\n\n* Customizable workflows, unlocking enforcement, approvals, and insight into these workflows.\n* More customizable and fine-grained permissions.\n* Logs for everything that’s done within GitLab and allow those events to be accessible via the API and UI.\n* Alerting on GitLab audit events.\n\n### Project management morphs into product management\n\nProduct managers often struggle with answering the question, \"Is the product or feature I just launched successful?\". There are many sensing mechanisms to help answer this question, including revenue, users, customer feedback, NPS, etc., but there is currently no product that helps product managers exhaustively manage the product development lifecycle from end to end. Many products assist with planning, delivery of code, and deployment, but feedback and iteration are equally as important to product managers as shipping the first iteration. Getting the first iteration out is traditionally celebrated, but is only one of many steps to true product development lifecycle management.\n\nImagine an experience where product managers can log in and view the \"health\" of their entire portfolio on one dashboard. It is clear which features have the most value to customers (and by extension to the business) as measured by key metrics, assisting PMs with priortization activities. PMs can quickly identify features or products within their portfolio that need more attention and drill into them, identifying the correct next action to take, whether it's iteration on the feature or perhaps sunsetting it. PMs can quickly create an issue for the next iteration, use version control features, view security incidents, respond to customer feedback, drill down into analytics, control A/B tests of the feature, and even interact with users of the feature or product directly by creating ad-hoc surveys or questions for users to answer. Additionally, the experience should allow for ROI analysis and tracking of the ROI after capital has been expended.\n\nWithin three years, project management tools will begin evolving to provide this experience and help PMs answer tough product questions. These tools will also assist with measuring and predicting value to the organization, if a specific action is prioritized by the PM. The ideal solution most likely uses data science and predictive analytics to assist product managers with decisions both before and after a feature is launched.\n\nAs examples, GitLab will provide:\n\n* Feature management capabilities, including the ability to manage a feature as an object inside of GitLab that lives on after an issue is closed.\n* An experience where PMs can quickly analyze the health of all relevant features.\n* A framework that helps PMs with prioritization decisions.\n* A framework for ROI analysis and measurement.\n\n## Our three-year strategy\n\nIn three years, the Dev section market will:\n\n* Centralize around Git as the version control of choice for not only code, but for design assets, gaming, silicon designs, and AI/ML models.\n* Have a market leader emerge in the value stream management space. Currently, the market is fragmented with most players focused on integrations into various DevOps tools.\n* Adopt a mindset shift from project management to product management.\n* Recognize the value of a single platform for all software creation activities, including product management.\n* See an uptick in startups and applications being built on the backs of a \"no code\" framework\n\nConsidering the evolution of the Dev section market, in three years, GitLab will:\n\n* Provide a next-generation, highly performant Git-backed version control system for large assets, such as ML models. Our goal in three years should be to host the most repositories of these non-code assets.\n* Emerge as the leader in VSM and be recognized in the industry by customers and analysts as such. Our goal in three years should be to provide the best insights into the product development process that no other tool can come close to, as we have a [unified data model](https://www.ca.com/en/blog-itom/what-is-a-unified-data-model-and-why-would-you-use-it.html) due to GitLab being a single platform.\n* Develop an industry-leading product management platform where multiple features and products can be measured and managed easily.\n* Research and potentially add capabilities for \"no code\" workflows.\n\n## Our one-year plan: What’s next for Dev\n\nOver the next 12 months, each stage in the Dev section will play an integral part in this strategy.\n\nPlease see the [categories page](/handbook/product/categories/#dev-section) for a more detailed look at Dev's plan by exploring `Strategy` links in areas of interest.\n\n### Manage\n\n**Enterprise readiness:** GitLab must be seen as a platform that enterprises can use out-of-the-box for both GitLab.com and self-managed deployments. We're doing this by focusing on improvements in several key areas:\n\n  * Enterprise-grade authentication and authorization. Critical for large organizations managing users at scale, we're focused on investing in SAML SSO that works across a range of identity providers and automates member management.\n  * Improving tools that help compliance-minded organizations thrive. GitLab makes it easy to contribute, but administrators should have comprehensive and consistent views on instance activity. We'll improve audit management to a lovable category and introduce dashboarding and alerting to help tell your compliance story to stakeholders. We'll also solve a pronounced need for fine-grained member permissions.\n\n**Lowering time to production for our customers:** Improvements to productivity and code analytics over the next 12 months will allow our customers to drill down and identify sources of waste in their existing process. Within 12 months, GitLab customers will be able to answer how much their time-to-production metrics have improved.\n\n**A great import experience:** Few instances start from scratch – for most, one of the earliest tasks for a GitLab administrator is importing information from outside the application. We'll invest heavily in a strong import user experience and build bespoke importers for key competitors like Jenkins and Jira. We'll also expand on the capabilities of our existing importers, with a focus on making GitLab.com migration easy.\n\n### Plan\n\n\n\n**Kanban boards**: Current project management tools are capable, but suffer from usability. Trello made significant gains by focusing on the user experience. Unfortunately, Trello chose to be a general tool which left some software teams wanting features designed specifically to help with software development and delivery. GitLab has an opportunity to re-design Kanban boards for software teams – think of how Jira could work if it were designed by Trello as opposed to the other way around. Our boards need to evolve to be a primary interface, a complete WYSIWYG document view where everyone who is looking on board X is seeing the same thing (updated in real time), with rich interaction without having to leave the board. This may include changes such as having short summaries, first-class checklists, quick filters, etc. In addition, boards need to focus on common workflows of software teams such as issue triage, daily workflow, sprint planning, quarterly planning, executive reporting, etc.\n\n**Importing from Jira without losing required data**: In the next 12 months, we will deliver enforced workflows, a better roadmap experience, cumulative flow diagrams, and improvements to boards in order to enable a better planning and project management experience.\n\n**Enhancing portfolio and project roadmaps**: Provide easy-to-use, cross-team roadmaps at the portfolio, project, and epic level that allow users across the organization to see how work is progressing and identify dependencies and blockers. Organize and prioritize work though dynamic roadmaps in real time.\n\n**Easy top-down planning**: Enhanced portfolio management experience allowing customers to start planning from the top: Creating initiatives, projects, and epics while laying them out on a roadmap prior to the creation of issues and milestones. Provide analytics at each level, and allow linking of each object to provide deeper dependency mapping across multiple teams and projects. Enable users to create strategic initiatives and assign work, impact, and resources to each to help them make the right business decisions. Additionally, in order for our users to get more value out of Plan, we will be implementing [Epic features to be more aligned with our buying tiers](https://gitlab.com/groups/gitlab-org/-/epics/1887).\n\n**Reporting and analytics**: Provide dashboarding and analytics for project and portfolio management, allowing business to track and communicate progress on work in-flight, capacity of teams and projects, and overall efficiency across their full portfolio.\n\n**Requirements management**: Many regulated customers want to use GitLab for requirements mapping, dependencies, and process management. GitLab will provide these capabilites in a modern-first way.\n\n### Create\n\n**Realtime:** It's time to fully embrace realtime. Many parts of GitLab update in near real time, but not everything does, and unfortunately some of the parts that are left out are critical to a great experience. Realtime kanban boards is mentioned above in Plan, but within Create, there's tons of opportunity for realtime enhancements. Areas we are thinking about are real time editing of code in the Web IDE for live coding and real time editing of issue/MR descriptions and comments.\n\n**Git availability and performance:** Git is a critical component in the deployment process when practicing [Continuous Deployment](https://docs.gitlab.com/ee/ci/introduction/#continuous-deployment). As such, service degradations or outages that prevent access to Git cannot be tolerated. To this end, making [Gitaly highly available](https://gitlab.com/groups/gitlab-org/-/epics/842) is of the utmost importance, and secondarily, improve the handling of extreme read pressures exterted by highly parallelized CI loads that cause performance degradations.\n\n**Enhancing the code review experience:** In the next 12 months, we must focus code review to be more performant and intelligent. We will do this by investing in [performance improvements](https://gitlab.com/groups/gitlab-org/-/epics/1417), adding additional code review functionality such as jump to definition, identifying references, displaying function documentation and type signatures, and adding support for first-class reviewers. Code review should be an \"IDE like\" experience.\n\n**Making large files “just work” in Git:** To gather more market share from industries that currently use Perforce or SVN, we must invest in making the large-file experience in Git excellent. It should “just work” without configuration or specialized hardware.\n\n**Investing in our Wiki product:** Many customers currently use Wikis for knowledge bases and project management activities. Our first step in making the GitLab Wiki more competitive is making wikis available at the group level and enhancing markdown support.\n\n**Focusing on the gaps in the design management workflow:** Most designers use a sketch or prototyping tool already, but version controlling assets alongside code and providing a workflow to compare those assets to what front-end teams ship is a gap in the market. We are uniquely poised to capitalize on this gap – think\nvisual review apps checked against the mockups checked into the repository. Additionally, we will continue to make improvements to the collaboration aspect of designs and consider other features such as simple sketch functionality inside of issues and MRs.\n\n**Enabling easier contributions to GitLab:** Contributing to GitLab requires users to set up and run the [GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit) locally. This is cumbersome and typically requires multiple hours of debugging with senior engineers. While the process of streamlining the GDK locally should be advanced, GitLab should also provide the GDK as part of the Web IDE experience. Allowing contributors to quickly spin-up feature branches should encourage more contributions from non-engineering GitLab team members, as well as the wider community.\n\n**Bolstering the editor experience:** Our current Web IDE experience is useful for small changes, but has not been useful as an actual replacement for a local IDE. Over the next year, we will evaluate the impact of adding a container-based IDE solution, while continuing to streamline our editing experience, potentially by sunsetting the ACE editor. We will also improve the IDE experience with self-managed, client-side evaluation, server-side evaluation, and live-coding features for pair programming.\n\n**Creating a content management experience:** Projects in GitLab aren't always leveraged by pure engineering teams. Groups like marketing, sales and others often have needs for projects that more closely resemble marketing websites or documentation. While [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) enables the deployment of many popular static site generators, the editing experience is still geared toward technical users. Enabling a more WYSIWYG content management editor will help support non-technical personas use of GitLab for non-engineering driven projects.\n\n### What we're not doing\n\nChoosing to invest in these areas in 2020 means we will choose not to:\n\n* **Invest in features that help companies answer, “Am I doing the right activities?”.** Answering this question is something we will focus on in years two and three of the VSM plan.\n* **Treat ML models as first-class citizens in GitLab.** Instead, we will focus on getting large assets to become performant via improvements to Gitaly. Once this is completed, we will focus on ML models.\n* **Provide recommendations where customers can improve their efficiency in the DevOps lifecycle.** This will likely require comparisons amongst many GitLab users and an AI engine to make intelligent recommendations. These improvements will come in years two and three of the VSM plan.\n\n### Other areas of investment consideration\n\n* Data science: We should consider investment into a data science team that can assist with recommendations for Plan and VSM features.\n* [Dark themes](https://gitlab.com/gitlab-org/gitlab-ee/issues/14531): We should consider prioritizing a dark theme for both GitLab, as well as the Web IDE/editing experience. This is an expected feature of most modern development tools.\n* Engineering: Most Dev groups should see 50-100% headcount growth in order to make our Dev categories lovable.\n* AI: We should consider beginning to invest into AI as a solution for recommendations – for example, recommended assignees, labels, etc.\n\n## Survey\n\nNow that you're heard our strategy and plans, we'd love to hear your feedback. Please click below for a quick two-question survey.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nHelp shape our Dev strategy - [Take our survey](https://docs.google.com/forms/d/e/1FAIpQLSdfZTpqNYilD-bzcRKPPo5AIVZq-k5GrYd_thr21iXcreA-oQ/viewform)!\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nCover Photo by [Joanna Kosinska](https://unsplash.com/@joannakosinska) on [Unsplash](https://unsplash.com/).\n{: .note}\n",[9,677],{"slug":1801,"featured":6,"template":680},"dev-strategy-review","content:en-us:blog:dev-strategy-review.yml","Dev Strategy Review","en-us/blog/dev-strategy-review.yml","en-us/blog/dev-strategy-review",{"_path":1807,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1808,"content":1814,"config":1820,"_id":1822,"_type":14,"title":1823,"_source":16,"_file":1824,"_stem":1825,"_extension":19},"/en-us/blog/developer-relations-at-gitlab-what-weve-learned-since-our-start",{"title":1809,"description":1810,"ogTitle":1809,"ogDescription":1810,"noIndex":6,"ogImage":1811,"ogUrl":1812,"ogSiteName":667,"ogType":668,"canonicalUrls":1812,"schema":1813},"Developer Relations at GitLab: What we've learned since our start","DevRel is key to success for many tech companies. Find out how GitLab's DevRel program has evolved to stay aligned with the industry and our customers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672008/Blog/Hero%20Images/AdobeStock_204527293.jpg","https://about.gitlab.com/blog/developer-relations-at-gitlab-what-weve-learned-since-our-start","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developer Relations at GitLab: What we've learned since our start\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Coghlan\"}],\n        \"datePublished\": \"2024-03-13\",\n      }",{"title":1809,"description":1810,"authors":1815,"heroImage":1811,"date":1817,"body":1818,"category":808,"tags":1819},[1816],"John Coghlan","2024-03-13","Earlier this year, a tweet (are they still called that?) by [Kelsey Hightower](https://twitter.com/kelseyhightower) sparked discussion on social media and internally at GitLab. \n\n![Kelsey Hightower tweet](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678041/Blog/Content%20Images/Screenshot_2024-03-08_at_8.19.09_AM.png)\n\nAt first, Kelsey's response might seem a bit flippant, but there’s an underlying truth to it: Developer Relations (short: DevRel) – and other business functions – must meet the needs of the business and your customers. However, what your stakeholders and customers need will be different in the future. Therefore, to be successful, you have to iterate to stay aligned with them. \n\nReflecting back on my five years working in Developer Relations (formerly known as Community Relations) at GitLab, our team has continuously evolved to stay aligned with the needs of our customers, our community, and the business. GitLab CEO and founder Sid Sijbrandij explains how North Star Metrics evolve in his blog post on goal-setting for startups: [Artificially constraining your company to one goal creates velocity and creativity](https://opencoreventures.com/blog/2023-06-05-artificially-constrain-one-goal-to-create-creativity-velocity/). He details the shift from attention to active users to revenue to profit. The evolution of DevRel at GitLab in many ways maps to that same journey.\n\n![What is DevRel - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678041/Blog/Content%20Images/image1.png)\n\n## Early DevRel at GitLab\n\nWhen I joined GitLab in 2018, our team was largely made up of Community Advocates, an Evangelist Program Manager (me), a Code Contributor program manager, and a director. The Community Advocates were tasked with monitoring and engaging with GitLab community members across various online channels but primarily [Hacker News](https://handbook.gitlab.com/handbook/marketing/developer-relations/developer-evangelism/hacker-news/) and Twitter. Answering questions and creating issues based on comments served to increase awareness and attention for GitLab. In addition, users learned that their questions would be answered and feedback was being heard and, frequently, acted on.\n\nAt the same time, the Code Contributor program and Evangelist program were driving growth and interest in GitLab by helping our contributors navigate the contribution process, organizing events and meetups to connect our community, and deepening our relationship with our community champions, also known as [GitLab Heroes](https://about.gitlab.com/community/heroes/). \n\nFor companies in early stages, this is how DevRel often looks. The key tactics in this phase are:\n- use low-cost tools (blogs and social media) to drive attention\n- capitalize on people’s interest to deepen relationships and create advocates and champions\n- smooth the pathways to contribute or discover content\n\n> **Tip:** Direct engagement with your community through social media and online forums drives awareness, builds trust, and increases the quality and volume of feedback on your product. \n\n## Expanding DevRel's reach \n\nNext, we ramped up programs like GitLab for Open Source and GitLab for Education. These programs helped attract to our platform key open source projects and many large academic institutions, both with large numbers of engaged users. More users meant more feedback to help us improve the product and more contributors. \n\nAs attention grew and the breadth and depth of our platform increased, we needed to better enable our customers to leverage the capabilities of GitLab’s DevSecOps Platform. This stage roughly maps to the revenue North Star Metric. To drive greater awareness and adoption, the Community Relations team underwent a critical change.\n\n> **Tip:** When looking to grow your active users, engage with partners who can bring their community to your product or platform. This strategy is often overlooked but can be a big boost to awareness and growth, setting you up for success. \n\n## Deepening the DevRel bench\n\nAs our next move, we formed a team of technical experts, known as Developer Evangelists. This team engaged in more traditional DevRel practices, those that might come to mind when asking yourself “What is DevRel?”. Internally, we referred to this team’s role as the three Cs: \n- Content creation - creating blog posts, technical talks, demos, and other content to enable our customers\n- Community engagement - engaging online and at events with our customers and community\n- Consulting - serving as internal advocates for and experts on the wider GitLab community\n\nHaving technical experts who could connect directly with customers and escalate that feedback internally helped improve the feedback loop between users and product teams. This team also deeply understood GitLab users, which improved the company's ability to enable our customers and community through content.\n\n> **Tip:** Early in your company journey, executives, product managers, and engineers play a vital role in engaging with community. As the number of users grows, you’ll need technical experts on your team who can directly engage with users and ensure customer feedback reaches key stakeholders (executives and product owners).\n\n## Continuously evolving DevRel at GitLab\n\nOver the past year, the team has evolved again.\n\n- A new vice president joined our team and has helped us become more strategic and better aligned cross-functionally.\n\n- A Contributor Success team was established to better engage and align with our customers around contributions to GitLab. Evolving from a one-person function to a full-fledged team of engineers with deep experience in open source (including multiple past contributors to GitLab), this team continuously improves the contribution experience and engages directly with customers who wish to contribute.\n\n- We updated our team name and many of our team members’ job titles to align with industry standards.\n\n- And we’ve all ramped up quite a bit on AI, perhaps you’ve heard of [GitLab Duo](https://about.gitlab.com/gitlab-duo/)? \n\nAs GitLab continues to mature as a public company, the team will continue to evolve. Through these changes, we will stay focused on increasing the efficiency and impact of our efforts for our customers, our product, and our team.\n\n## Gaining - and maintaining - executive buy-in\n\nExecutive buy-in is essential for DevRel. Look at the companies with the largest, most engaged communities and you will find that those companies also have the most active, engaged, and often highly respected founders and CEOs. This is certainly true with GitLab. \n\nGitLab’s engagement with our community began before we were even a company when Dmitriy Zaporozhets (DZ) started the open source GitLab project with [this commit](https://gitlab.com/gitlab-org/gitlab-foss/commit/9ba1224867665844b117fa037e1465bb706b3685). The engagement continued when Sid [launched GitLab on Hacker News](https://news.ycombinator.com/item?id=4428278).\n\nThe importance of community in GitLab’s success cannot be overstated, and while we’ve grown to heights that few companies reach, contributions from our customers and community remain central in [our strategy](https://handbook.gitlab.com/handbook/company/strategy/#dual-flywheels). Because of this, team members, from the highest levels of GitLab and throughout our organization, remain in active communication with our customers via issues and social forums, working hard at all times to help them succeed. Transparency is key here. Documenting our DevRel strategies in the [public GitLab handbook](https://handbook.gitlab.com/handbook/marketing/developer-relations/) enables everyone to contribute.\n\n> **Tip:** Executive support is critical when building a community.\n\n## So what is DevRel?\n\nI want to go back to the initial question that sparked this blog: What is DevRel? \n\nI’ll leave you with a quote from Emilio Salvador, vice president of Developer Relations at GitLab, which was recently merged to [our handbook page](https://handbook.gitlab.com/handbook/marketing/developer-relations): \n\n\u003Ci>\"Developer Relations (short: DevRel) operates at the intersection of technology, community, and advocacy, serving as the voice and ears of GitLab in the wider tech world. Their core mission revolves around nurturing and sustaining a vibrant, engaged community of developers, contributors, and users. This involves a multifaceted approach that includes creating educational content, organizing events and workshops, developing programs, and providing platforms for knowledge exchange and collaboration. The team not only focuses on promoting GitLab’s features and capabilities but also actively listens to and incorporates feedback from the community to inform product development and improvements.\"\u003C/i>\n\nThat’s what it is today, but if the history of DevRel at GitLab is any indication, I expect that we’ll continue to iterate going forward. \n\n> [Join our Discord community](https://discord.gg/gitlab) to continue the conversation.\n",[1440,1298,9],{"slug":1821,"featured":91,"template":680},"developer-relations-at-gitlab-what-weve-learned-since-our-start","content:en-us:blog:developer-relations-at-gitlab-what-weve-learned-since-our-start.yml","Developer Relations At Gitlab What Weve Learned Since Our Start","en-us/blog/developer-relations-at-gitlab-what-weve-learned-since-our-start.yml","en-us/blog/developer-relations-at-gitlab-what-weve-learned-since-our-start",{"_path":1827,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1828,"content":1834,"config":1841,"_id":1843,"_type":14,"title":1844,"_source":16,"_file":1845,"_stem":1846,"_extension":19},"/en-us/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale",{"title":1829,"description":1830,"ogTitle":1829,"ogDescription":1830,"noIndex":6,"ogImage":1831,"ogUrl":1832,"ogSiteName":667,"ogType":668,"canonicalUrls":1832,"schema":1833},"Developing GitLab Duo: How we validate and test AI models at scale","Our blog series debuts with a behind-the-scenes look at how we evaluate LLMs, match them to use cases, and fine-tune them to produce better responses for users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659856/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25.png","https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: How we validate and test AI models at scale\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Susie Bitters\"}],\n        \"datePublished\": \"2024-05-09\",\n      }",{"title":1829,"description":1830,"authors":1835,"heroImage":1831,"date":1837,"body":1838,"category":1839,"tags":1840},[1836],"Susie Bitters","2024-05-09","**_Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers._**\n\nGitLab values the trust our customers place in us. Part of maintaining that trust is transparency in how we build, evaluate, and ensure the high-quality functionality of our [GitLab Duo](https://about.gitlab.com/gitlab-duo/) AI features. GitLab Duo features are powered by a diverse set of models, which allows us to support a broad set of use cases and gives our customers flexibility. GitLab is not tied to a single model provider by design. We currently use foundation models from [Google](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/main/ai_gateway/models/vertex_text.py?ref_type=heads#L86) and [Anthropic](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/main/ai_gateway/models/anthropic.py?ref_type=heads#L62). However, we continuously assess what models are the right matches for GitLab Duo’s use cases. In this article, we give you an inside look at our AI model validation process.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## Understanding LLMs\n\nLarge language models (LLMs) are generative AI models that power many AI features across the platform. Trained on vast datasets, LLMs predict the next word in a sequence based on preceding context. Given an input prompt, they generate human-like text by sampling from the probability distribution of words conditioned on the prompt.\n\nLLMs enable intelligent code suggestions, conversational chatbots, code explanations, vulnerability analysis, and more. Their ability to produce diverse outputs for a given prompt makes standardized quality evaluation challenging. LLMs can be optimized for different characteristics, which is why there are so many AI models actively being developed.\n\n## Testing at scale\n\nUnlike traditional software systems where inputs and outputs can be more easily defined and tested, LLMs produce outputs that are often nuanced, diverse, and context-dependent. Testing these models requires comprehensive strategies that account for subjective and variable interpretations of quality, as well as the stochastic nature of their outputs. We, therefore, cannot judge the quality of an LLM’s output in an individual or anecdotal fashion; instead, we need to be able to examine the overall pattern of an LLM's behavior. To get a sense of those patterns, we need to test at scale. Testing at scale refers to the process of evaluating the performance, reliability, and robustness of a system or application across a large and diverse array of datasets and use cases. Our [Centralized Evaluation Framework (CEF)](https://about.gitlab.com/direction/ai-powered/ai_model_validation/ai_evaluation/) utilizes thousands of prompts tied to dozens of use cases to allow us to identify significant patterns and assess the overall behavior of our foundational LLMs and the GitLab Duo features in which they are integrated.\n\nTesting at scale helps us:\n\n- **Ensure quality:** Testing at scale enables us to assess the quality and reliability of these models across a wide range of scenarios and inputs. By validating the outputs of these models at scale, we can start to identify patterns and mitigate potential issues such as systematic biases, anomalies, and inaccuracies. \n- **Optimize performance:** Scaling up testing efforts allows GitLab to evaluate the performance and efficiency of LLMs under real-world conditions. This includes assessing factors such as output quality, latency, and cost to optimize the deployment and operation of these models in GitLab Duo features.\n- **Mitigate risk:** Testing LLMs at scale helps mitigate the risks associated with deploying LLMs in critical applications. By conducting thorough testing across diverse datasets and use cases, we can identify and address potential failure modes, security vulnerabilities, and ethical concerns before they impact our customers.\n\nTesting LLMs at scale is imperative for ensuring their reliability and robustness for deployment within the GitLab platform. By investing in comprehensive testing strategies that encompass diverse datasets, use cases, and scenarios, GitLab is working to unlock the full potential of AI-powered workflows while mitigating potential risks.\n\n### How we test at scale\n\nThese are the steps we take to test LLMs at scale.\n\n#### Step 1: Create a prompt library as a proxy for production\nWhile other companies view and use customer data to train their AI features, GitLab currently does not.  As a result, we needed to develop a comprehensive prompt library that is a proxy for both the scale and activity of production.\n\nThis prompt library is composed of questions and answers. The questions represent the kinds of queries or inputs that we would expect to see in production, while the answers represent a ground truth of what our ideal answer would be. This ground truth answer could also be mentally framed as a target answer. Both the question and the answer may be human generated, but are not necessarily so. These question/answer pairs give us a basis for comparison and a reference frame that allow us to tease out differences between models and features. When multiple models are asked the same question and generate different responses, we can use our ground truth answer to determine which model has provided an answer that is most closely aligned to our target and score them accordingly.\n\nAgain, a key element of a comprehensive prompt library is ensuring that it is representative of the inputs that we expect to see in production. We want to know how well foundational models fit to our specific use case, and how well our features are performing. There are numerous benchmark prompt datasets, but those datasets may not be reflective of the use cases that we see for features at GitLab. Our prompt library is designed to be specific to GitLab features and use cases.\n\n#### Step 2: Baseline model performance\n\nOnce we have crafted a prompt library that accurately reflects production activity, we feed those questions into [various models](https://about.gitlab.com/direction/ai-powered/ai_model_validation/ai_evaluation/foundation_models/) to test how well they serve our customer’s needs. We compare each response to our ground truth and provide it a ranking based on a series of metrics including: [Cosine Similarity Score](https://about.gitlab.com/direction/ai-powered/ai_model_validation/ai_evaluation/metrics/#similarity-scores), [Cross Similarity Score](https://about.gitlab.com/direction/ai-powered/ai_model_validation/ai_evaluation/metrics/#cross-similarity-score),  [LLM Judge](https://about.gitlab.com/direction/ai-powered/ai_model_validation/ai_evaluation/metrics/#llm-judge), and [Consensus Filtering with an LLM Judge](https://about.gitlab.com/direction/ai-powered/ai_model_validation/ai_evaluation/metrics/#consensus-filtering-with-llm-judge). This first iteration provides us a baseline for how well each model is performing, and guides our selection of a foundational model for our features. For brevity, we won’t go into the details here, but we encourage you to [learn more about more about the metrics here](https://about.gitlab.com/direction/ai-powered/ai_model_validation/ai_evaluation/metrics/). It is important to note this isn’t a solved problem; the wider AI industry is actively researching and developing new techniques. GitLab’s model validation team keeps a pulse on the industry and is continuously iterating on how we measure and score the LLMs GitLab Duo uses.  \n\n#### Step 3: Feature development\n\nNow that we have a baseline for our selected model's performance, we can start developing our features with confidence. While prompt engineering gets a lot of buzz, focusing entirely on changing the behavior of a model via prompting (or any other technique) without validation means that you are operating in the dark and very possibly overfitting your prompting. You may solve one problem, but be causing a dozen more. You would never know. Creating a baseline for a model's performance allows us to track how we are changing behavior over time for all our necessary use cases. At GitLab, we re-validate the performance of our features on a daily basis during active development to help ensure that all changes improve the overall functionality.\n\n#### Step 4: Iterate, iterate, iterate\n\nHere is how our experimental iterations work. Each cycle, we examine the scores from our tests at scale to identify patterns:\n\n- What are the commonalities across our weakest areas?\n- Is our feature performing poorly based on a specific metric or on a certain use case?\n- Do we see consistent errors popping up in response to a certain kind of question?\n\nOnly when we test at scale do these kinds of patterns begin to emerge and allow us to focus our experiments. Based on these patterns, we propose a variety of experiments or approaches to try to improve performance in a specific area and on a specific metric.\n\nHowever, testing at scale is both expensive and time-consuming. To enable faster and less expensive iteration, we craft a smaller scale dataset to act as a mini-proxy. The focused subset will be weighted to include question/answer pairs that we know we want to improve upon, and the broader subset will also include sampling of all the other use cases and scores to ensure that our changes aren't adversely affecting the feature broadly. Make your change and run it against the focused subset of data. How does the new response compare to the baseline? How does it compare to the ground truth?\n\nOnce we have found a prompt that addresses the specific use case we are working on with the focused subset, we validate that prompt against a broader subset of data to help ensure that it won’t adversely affect other areas of the feature. Only when we believe that the new prompt improves our performance in our target area through validation metrics AND doesn’t degrade performance elsewhere, do we push that change to production.\n\nThe entire Centralized Evaluation Framework is then run against the new prompt and we validate that it has increased the performance of the entire feature against the baseline from the day before. In this way, GitLab is constantly iterating to help ensure that you are getting the latest and greatest performance of AI-powered features across the GitLab ecosystem. This allows us to ensure that we keep working faster, together.\n\n### Making GitLab Duo even better\n\nHopefully this gives you insight into how we’re responsibly developing GitLab Duo features. This process has been developed as we’ve brought [GitLab Duo Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/) and [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) to general availability. We’ve also integrated this validation process into our development process as we iterate on GitLab Duo features. It’s a lot of trial and error, and many times fixing one thing breaks three others. But we have data-driven insights into those impacts, which helps us ensure that GitLab Duo is always getting better.\n\n> Start a [free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) today!\n\n ## Resources\n - [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/)\n - [GitLab's AI Ethics Principles for Product Development](https://handbook.gitlab.com/handbook/legal/ethics-compliance-program/ai-ethics-principles/)\n - [GitLab AI-powered Direction page](https://about.gitlab.com/direction/ai-powered/)\n\n\u003Cfigure class=video_container>\n\u003Ciframe width=560 height=315 src=\"https://www.youtube-nocookie.com/embed/LifJdU3Qagw?si=A4kl6d32wPYC4168\" title=\"YouTube video player\" frameborder=0 allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen=\"\">\u003C/iframe>\n\u003C/figure>\n\n## Read more of the \"Developing GitLab Duo\" series\n\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n- [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/) \n- [Developing GitLab Duo: Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)\n- [Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)","ai-ml",[1299,1298,475,677,9],{"slug":1842,"featured":91,"template":680},"developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale","content:en-us:blog:developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale.yml","Developing Gitlab Duo How We Validate And Test Ai Models At Scale","en-us/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale.yml","en-us/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale",{"_path":1848,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1849,"content":1855,"config":1860,"_id":1862,"_type":14,"title":1863,"_source":16,"_file":1864,"_stem":1865,"_extension":19},"/en-us/blog/devops-strategy",{"title":1850,"description":1851,"ogTitle":1850,"ogDescription":1851,"noIndex":6,"ogImage":1852,"ogUrl":1853,"ogSiteName":667,"ogType":668,"canonicalUrls":1853,"schema":1854},"Beyond CI/CD: GitLab's DevOps vision","How we're building GitLab into the complete DevOps toolchain.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670214/Blog/Hero%20Images/devops-nova-scotia-cover.jpg","https://about.gitlab.com/blog/devops-strategy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Beyond CI/CD: GitLab's DevOps vision\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Pundsack\"}],\n        \"datePublished\": \"2017-10-04\",\n      }",{"title":1850,"description":1851,"authors":1856,"heroImage":1852,"date":1857,"body":1858,"category":299,"tags":1859},[1796],"2017-10-04","\n\nWith GitLab 10.0, we shipped [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) for the Community and Enterprise\nEditions. Read on for an in-depth look at our strategy behind it, and beyond.\n\n\u003C!-- more -->\n\nI recently met with my colleagues\n[Joe](/company/team/#JAScheuermann) and\n[Courtland](/company/team/#mktinghipster) to give them the\nlowdown on GitLab's DevOps vision: where we've come from and where we're headed.\nYou can watch the video of our discussion or check out the lightly edited\ntranscript below. You can also jump into the rabbit hole, starting with the meta\nissue for [GitLab DevOps](https://gitlab.com/gitlab-org/gitlab-ce/issues/32639).\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/zMAB42g4MPI\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\n\n## CI/CD: Where we've come from\n\n![CI/CD/Beyond CD](https://about.gitlab.com/images/blogimages/devops-strategy-ci-scope.svg)\n\nWhen I joined GitLab about a year ago, I created a [vision document for\nCI/CD](/direction/#ci--cd), and outlined a lot of the\nkey things that I thought were missing in [CI/CD in general](/topics/ci-cd/), and going beyond CD.\nI literally called one section \"beyond CD\" because I didn’t have a name for it\nthen.\n\nAnd in that document, I create an example pipeline to characterize all this\nstuff, to show how the pieces fit together into a development lifecycle.\n\n![Example pipeline](https://about.gitlab.com/images/blogimages/devops-strategy-example-pipeline.png){: .shadow}\n\nI love this diagram not only because it's complex and scary, but because when we\nstarted, we had maybe four boxes filled in, and now we have 10 or 12 filled in. To\nstart with, we had code management and, obviously, builds and tests. And we kind\nof did deployment, but not really.\n\nSince then, we’ve added review apps – a specific example of deployments – which\nis really awesome. We also added a more formalized mechanism for doing\ndeployments; actually recording deployments and deployment histories, keeping\ntrack of environments, and everything else. Then we added Canary Deployments in\n9.2 and code quality in 9.3. We added system monitoring with Prometheus in 9.0.\n\nWe don’t yet have what I called \"business monitoring,\" which could mean\nmonitoring revenue, or clicks, or whatever you care about; but that’s coming. We\ndon't yet have load testing, but the Prometheus team is thinking about that.\nWe don't yet have a plan for feature flags, but I think it's a really important\npart.\n\nAnd then we have this other dimension of pipelines, which is the relationship\nbetween different codebases (or projects), and in 9.3 we introduced the first\nversion of multi-project pipelines.\n\nSo we've gone from a core view of three or four boxes to where 90 percent is\ncomplete. That's pretty awesome.\n\nIt became obvious to me that we were viewing the scope with this hard line:\ndeveloper focused rather than an ops focused. For example, we’ll deploy into production,\nand we might even watch the metrics related to your code in production, but\nwe’re not going to monitor your entire production app, because that’s\noperations, and that’s clearly out of scope, right?\n\n## Where we're headed: Beyond CD\n\nWhat hit me a few months ago is, \"Why is that out of scope? That’s ridiculous.\nNo, we’re going to keep going. We're going to go past production into\noperations.\" Most of this still applies, but instead of just monitoring the\nsystem as it relates to a merge request, what about monitoring the system for\nnetwork errors, outages, or dependency problems? What if we don't stop at\nproduction, and monitor things that are typically ops related that may not\ninvolve a developer at all?\n\nThen I realized that this thing I called Beyond CD, maybe it's really [DevOps](/topics/devops/).\nMaybe the whole thing is DevOps.\n\n### The DevOps tool chain\n\nTo offer some context: DevOps is hard to define, because everybody defines it\nslightly differently. Sometimes DevOps is defined as the intersection of\ndevelopment, operations, and quality assurance.\n\n![DevOps Venn diagram](https://about.gitlab.com/images/blogimages/devops-strategy-venn-diagram.png){: .shadow}\n\n*\u003Csmall>Image by Rajiv.Pant, derived from Devops.png:, [CC BY 3.0](https://commons.wikimedia.org/w/index.php?curid=20202905)\u003C/small>*\n\nFor the most part, my personal interest in DevOps has been in that intersection.\nWe do great code management; we’ve done that for quite a while. How do we get\nthat code into production? How do we get it into QA?\n\nReview apps are a great example that fits squarely in that tiny, little triangle\nin the middle of the Venn diagram. You take your code, you deploy it, which is\nan operations thing, but you have it deployed in a temporary, ephemeral, app,\njust for QA people (or designers, product managers, or anyone who is not a\nprimary coder), so they can test your application for quality assurance, feature\nassurance, or whatever.\n\nBut now, I'm looking beyond the intersection. Here's the [DevOps tool chain\ndefinition](https://en.wikipedia.org/wiki/DevOps_toolchain) from Wikipedia:\n\n![DevOps Toolchain](https://about.gitlab.com/images/blogimages/devops-strategy-devops-toolchain.png){: .shadow}\n\n*\u003Csmall>Image by Kharnagy (Own work) [CC BY-SA 4.0](http://creativecommons.org/licenses/by-sa/4.0), via Wikimedia Commons\u003C/small>*\n\nWell, that’s everything! That’s not the intersection; that’s the union of\neverything from code, to releasing, to monitoring. And that's where things get\nconfusing. Sometimes when people talk about DevOps, they’re not talking about\nall of your code stuff. It’s the intersection parts that are the interesting\nparts of DevOps. It’s the parts where we let developers get their code into\nproduction easily. That slice, that intersection, of the Venn diagram, that’s\nthe interesting part about DevOps.\n\nHaving said that, as a product company, we are going to deliver things that are\npretty squarely on the development side, and, eventually, we’re going to deliver\nthings that are pretty squarely in the operations side. At some point, we may\nhave an operations dashboard that lets you understand your dependencies in your\nnetwork infrastructure, and your routers, and your whatever. That’s pretty far\nfetched at this point, but it could happen. Why not? Just have GitLab be\nyour one operations dashboard, and then it’s not just about the intersection of\nthe DevOps, it’s the whole DevOps tool chain.\n\nSo, that is the whirlwind, high-level summary of where we've been, and a little\nbit about where we’re going. Now let's get into specific issues.\n\n### The Ops Dashboard – [#1788](https://gitlab.com/gitlab-org/gitlab-ee/issues/1788)\n\nWe have a monitoring dashboard that's very developer centric. What about\ntaking that same content and slicing it from the operator's perspective? For a\nmoment, ignore all the stuff below, let’s just pretend there’s only the four\nboxes at the top:\n\n![Ops view of monitoring and deploy board](https://about.gitlab.com/images/blogimages/devops-strategy-monitoring-deploy-board.png){: .shadow}\n\nSo an operator might want to know, \"What’s the state of production?\" If I'm a\ndeveloper I can go into a project, into environments, see the production\nenvironment for that project, and I can see what the status is. But what if I\nwant to see all production environments? As an operations person, I care a\nlittle less about individual projects than I care about \"production.\" So this is\ngiving me the overview of \"production.\" All of these little boxes would\nrepresent production deploys of projects that you have in your GitLab\ninfrastructure.\n\nThe view is explicitly convoluted because we had just introduced sub-groups and\nI wanted to make sure this mechanism expanded. So ignore all the stuff below and\njust look at the top-level dashboards. Or maybe one level down, which is already\nstill pretty complicated, but let’s say your marketing organization had\ndifferent properties than your other developer operations; you’d be able to see\nreally quickly what the status is. If something’s red, you’d be able to click\ndown, and see details.\n\n![Ops view - service health](https://about.gitlab.com/images/blogimages/devops-strategy-service-health.png){: .shadow}\n\n![Ops view - pod health](https://about.gitlab.com/images/blogimages/devops-strategy-pod-health.png){: .shadow}\n\nYou’d be able to see graphs like this, which are similar to what we already\nprovide, but from the other angle. As a developer I’m looking at the deploy, and\nsaying, \"Oh, how did my deploy affect my performance?\" But this is saying,\n\"How’s production? Is anything wrong with my entire production suite?\"\n\nThis is really just scratching the surface of the ops views of things, but I\nthink it's going to become much more important as people embrace DevOps. You\nwant your developers to be talking the same language as your operations people.\nIn a lot of organizations, it’s already the same people – there are no separate\noperations people. Developers push code to production, and they're paged if\nsomething goes wrong. In others, developers and operators are separate, but they\nwant to work together towards DevOps.\n\nEither way, you want to be using the same tools. You want to be able to point\nto, for example, a memory bump that your operations people should also be able\nto see. But if they’re using completely different tools, like New Relic and\nDatadog, that kind of sucks. So let’s give them the same tools.\n\n### Pipeline view of environments – [#28698](https://gitlab.com/gitlab-org/gitlab-ce/issues/28698)\n\nI particularly love this proposal, and I really want to see this happen soon.\n\nThe environments page today is just a list of environments showing the last\ndeployment. The picture tells you who deployed, which is good, and you can see\nthat the commit is from the same SHA as staging, which is kind of nice. I can\nsee the deploy board, and if there's a deploy ongoing, I’m able to see the state\nas it rolls out. We don’t yet show you the current health of these pods; once\nthey're deployed, all we know is that they're deployed. This is how the\nenvironment view is today, and it's centered around deployments.\n\n![Environments list](https://about.gitlab.com/images/blogimages/devops-strategy-environments-list.png){: .shadow}\n*\u003Csmall>Current Environment view\u003C/small>*\n\nYou can click through to see the deployment history and this is actually really\nvaluable because I can see who deployed things, how long ago, and if something\nwent wrong in production I can really quickly roll back and let the developers\nhave some space to go and figure out what went wrong.\n\n![Deployment history](https://about.gitlab.com/images/blogimages/devops-strategy-deployment-history.png){: .shadow}\n*\u003Csmall>Current Deployment History view\u003C/small>*\n\nBut this proposal turns it around to have more of a DevOps view of the thing.\n\n![Pipeline view of environments](https://about.gitlab.com/images/blogimages/devops-strategy-pipeline-view-environments.png){: .shadow}\n*\u003Csmall>Proposed pipeline view of Environments\u003C/small>*\n\nThe idea is to take the same application, and instead of just looking at a list\nof environments, I’d be looking at columns with lots of review apps, and some\nnumber of staging environments, and a production environment. Instead of just\nshowing you the SHA, we would show you, for example, what merge requests have\nbeen merged into staging that are not yet in production. That’s a great\nmarriage of these two views, that you’d be able to see the diff between them.\n\nThis list, although it’s just a mockup, shows maybe the last five things that\nwere in production, or what was included in the last deploy, or whatever works\nbest for your environment. Showing what’s in the last deploy might be enough,\nbut for people who deploy 17 times a day, maybe that’s a little less useful, and\nwe just show history.\n\nBut then what about building in more of the operations kind of stuff, and\nsaying, \"Alright, what’s the state of my pods?\" Here we were flagging where the\nerror rate exceeded a threshold and there’s some alert that popped up. And here\nwe’re showing this automatic rollback kind of stuff, but basically just really\nbuilding on this ops view. Of course this is still a DevOps view, in the sense\nthat I’m looking at an individual project. So, one permutation of that would\nmarry that ops view of all of production. Or if I’m looking at a [microservices](/topics/microservices/)\nkind of thing, where there are five or 100 different projects, and I want to see\nthe status of all those really quickly. See\n[#28707](https://gitlab.com/gitlab-org/gitlab-ce/issues/28707).\n\n### Dependency security – [#28566](https://gitlab.com/gitlab-org/gitlab-ce/issues/28566)\n\nSo, here, the idea is that you've deployed something in production, and some\nmodule or something that you depend on has been updated, not by you, but by the\ncommunity, or someone else.\n\nThe easiest and most naive way to approach this is that with the next merge\nrequest, or next CI/CD run, we would go and check to see if anything’s outdated.\nAnd we might fail your CI/CD because of this.\n\nIt would make much more sense to run this stuff automatically. Even if, for\nexample, nobody pushes for seven days, and in the middle of that, there’s a\nsecurity release; just proactively run stuff and notify me. So, that's sort of a\nsecond iteration of thinking about how you would notify somebody, and tell them,\n\"Oh, you’ve got a security change. You should go in and do something about it.\"\n\nNow, the third iteration is, \"Well, what would you do with that information?\"\nYou’d go and maybe give it to your junior developer to go and make the change,\nand point to the new version. And then, of course, you need to test that it\nworks. So, you’re going to create a merge request, and then test it, to make\nsure that it still functions properly.\n\nWell, why notify somebody, and tell the junior developer to go and do this? Why\ndon’t we just do it for you? Why don’t we just go and submit the merge request\nfor you, and then tell you what the results are. And, in fact, let’s go further,\nand say, \"Hey it passed. We just deployed into production for you.\" Why would\nyou have security vulnerability in place any longer than necessary?\n\nAnd instead of having 100 alerts about 100 projects or microservices that all\nneed to get updated, you just get alerts about three of them that fail, that\nactually have some weird dependency that it didn’t work on. And then, you can\nfocus on real problems.\n\n![Dependency security](https://about.gitlab.com/images/blogimages/devops-strategy-dependency-security.png){: .shadow}\n\nSo, that’s a glimpse at how we’re thinking about this.\n\nThis would definitely be an enterprise-level feature. And again, we've fleshed\nout some ideas and it’s unscheduled, but it does really tie into the ops\nmindset.\n\n### Question: Enterprise Edition features\n\nCourtland: You mentioned that sort of automation would be an enterprise edition\nfeature. Can you talk a little bit more about why a smaller development team,\nlike under 100 developers, wouldn’t get value out of something like that?\n\nMark:\tSo, this is where things get a little tricky, because of course,\nsmaller developer teams would get value out of that too. Everybody would get\nvalue out of that. Some of it has to do with proportionality. One test I like to\nuse is: is there some other way you could achieve the same thing, using\nworkarounds, and we’re just making it easier? And that’s a good case, here. You\ncan already do this, but we’re going to automate it. And automation is something\nthat affects larger companies a lot more, because they’ve got hundreds of\nprojects, with thousands of developers. And they just can’t deal with the scale,\nor it’s worth dealing with the automation. Whereas, if you’ve got a small\ndeveloper, with a single project, you’re pretty much on top of it. And if\nsomething changes, yeah, you just go ahead and fix it; you’re aware of it. The\nbigger challenges are when you’re just not aware of how this thing might affect\none project that somebody’s almost forgotten about.\n\nThe other thing is that, just to be blunt, our concept that Enterprise Edition\nis only for more than X people, is a little flawed. It’s that it\napplies more to those companies, that those people value it more, and they’d be\nwilling to pay for it more, or however you judge your value there. Clearly,\nsmall companies would value all this automation, and everything else, but\nthey’re not going to get as much incremental value out of it, as a larger\ncompany would.\n\n~~The other way to look at it is that this is pretty advanced stuff, and frankly,\nit doesn’t deserve to be, free, open source. It’s probably really complicated\nstuff, and you’re going to have to pay there.~~ *[Editor's note: Advancedness is not a criteria in open sourcing or not open sourcing. There are advanced features that are open source, such as [Review Apps](/stages-devops-lifecycle/review-apps/). There are basic features that are proprietary, such as [File Locking](/solutions/file-locking/). The criteria we use to decide which version the features go in are documented on our [stewardship page](/company/stewardship/#what-features-are-paid-only).]* Maybe there’d be levels to it,\nright? There’d be a version that gives you an alert: we’ll run this test once a\nday. Or even just have a blog post about how to do this: you set up a recurring,\nscheduled pipeline job, once a day, to test if any of your dependencies have\nbeen updated. And you can do that today and then it would alert you. But to\nautomate it, to actually, create a merge request for you, and everything else?\nWell, that’s in the Enterprise feature. It’s not that version checking isn’t\nimportant for everybody, but the automation around it really, really matters for\nlarger companies. Does that make sense?\n\nCourtland:\tYeah, I mean, I think that the first way you described it, in that,\n\"Yeah, everyone gets some value out a feature like this, but the overwhelming\nvalue and use for this is in larger development teams,\" that resonated.\n\n### SLO and auto revert – [#1661](https://gitlab.com/gitlab-org/gitlab-ee/issues/1661)\n\nThis is a feature showing how we’re thinking about auto reverting something.\nWe’ve got canary deployments, and we have another feature we’re not currently\nworking on or scheduled, but it’s incremental rollout, so that you would not\njust rollout to a single canary, or a bucket of canaries, but it would slowly\nincrement: 1 percent, then 5 percent, then 25 percent. But let’s say, at some point, during my\nrollout, you detect an error.\n\n![Revert](https://about.gitlab.com/images/blogimages/devops-strategy-revert.png){: .shadow}\n\nThis a mockup of what it would look like. You’re like, \"Oh, error rates\nincreased by something above our threshold; let’s revert that one, go back, and\ncreate a new issue, and alert somebody to take a look at it.\" Lately, I’m\nthinking that I don’t know if I really want to automatically roll back, versus\njust stop it in its canary form, and say, \"Well, it’s canary. Let’s let canary\nbe there, so you can debug the canary, but just don’t let the canary go on\nfurther.\"\n\nError rate exceeding is a pretty tough one. But let’s say memory bumps up, and\nyou might be like, \"Yeah, we added something, and it’s using more memory, and\nwe’re okay with that. Don’t stop my deploy just because it’s using more memory.\"\nThere might need to be human intervention in there, but somewhere along this\nline we’re automating a lot of the deploy stuff.\n\n### Onboarding and adoption – [#32638](https://gitlab.com/gitlab-org/gitlab-ce/issues/32638)\n\nOnboarding and adoption is a really big issue, with lots of different ideas for\nhow to improve onboarding, how to get people actually using idea to production,\nimproving auto deploy. Not a lot of visuals, so I won’t really talk about it,\nbut it’s definitely one of our top priorities; the next most important thing\nwe’re working on.\n\n### Cloud development – [#32637](https://gitlab.com/gitlab-org/gitlab-ce/issues/32637)\n\nCloud development is the idea that setting up your local host machine is\nactually kind of a pain sometimes. Especially with microservices, where each\nservice can be in their own language, you don’t want to maintain Java, and Ruby,\nand Node, and all these other versions of dependencies, and every time something\nswitches, you’ve got to reinstall a new version of stuff. Or even these days,\nyou might develop on an iPad, and you don’t have a local host to compile things.\n\nCloud9 is the biggest, well known thing, from an IDE perspective, and Amazon\nbought them a little while ago. But even aside from the IDE portion of it, it’s\njust being able to develop in the cloud, and being able to make some changes,\nand then push them back; commit them to a repo.\n\nWe have a little bit of a demo like this, right now, with our web terminal. So,\nif you have Kubernetes, you see this terminal button, and it just pops up the\nterminal right in the staging server. And I can actually go ahead and edit a\nfile there, and... I just made a live change into my staging app.\n\nNow, generally speaking, I would not actually recommend you do that, because\nI’m messing with my staging app, that’s not what it's for. It makes an awesome\nlittle demo, but it’s not what you should do. What we want to do is come up with\na way that people could do that, but have it be not on your staging app, but in\nmaybe a dev environment that is specifically for this purpose. But that also,\nafter you make your changes, and test them, and run them live, you can then go\nand commit them back to [version control](/topics/version-control/), and close that loop. So there’s a whole\nbunch of issues related to that. And to be honest, it was what we were hoping\nthat Koding would have provided for us, and we have an integration\nwith them, but it hasn’t worked out, really, the way that we had hoped. And so,\nwe’re looking at alternatives, and we think we can probably do this ourselves.\n\nAnyway, that’s a big thing to flesh out.\n\n### GitLab PaaS – [#32820](https://gitlab.com/gitlab-org/gitlab-ce/issues/32820)\n\nHeroku is awesome, because it gives you this really great platform that’s easy\nto use, and gives you all this functionality on top of Amazon. Five or six years\nago it was super, brain-meltingly awesome to get people to do ops. For a\ndeveloper, I don’t have to be aware of how to do ops; Heroku just does ops\nfor us.\n\nGitLab PaaS is basically the idea that you’ve got a lot of these components, and\nwe’re not going to invent them all from scratch. We’re going to rely on\nKubernetes, for example. But on top of Kubernetes, we could make an awesome\nenvironment for ops. An ops environment, or a platform as a service. And so,\nthere’s an issue to discuss what it would take to do that. At some point in\ntime, this is a big item for us. If we can make it super really easy for you to\nfully manage your ops environment via GitLab, and maybe, for example, never\ntouch the Kubernetes dashboard; never touch any of the tools, just use the\nGitLab tools to do this. That’s pretty powerful.\n\nSort of related is an idea in the onboarding stuff, that on GitLab.com\nwe can actually provide you with a Kubernetes cluster; maybe a shared cluster. We\nhave to worry about security, of course. But imagine if you were a brand new\nuser on GitLab.com, and you push up an app, and you have nothing in there\nspecifically for GitLab, you just push up your code, and GitLab is like, \"Oh,\nthat’s a Ruby app. Okay, I know how to build Ruby apps. Oh, and I also know how\nto test Ruby apps. I’m just going to go and test them automatically for you.\"\nAnd, \"Oh, by the way, I know how to deploy this. I’m just going to go ahead and\ndeploy this to production.\" And we’ll make a\nproduction.project-name.ephemeral-gitlabapps.com, whatever the hell, some domain\nso that it’s not going to affect your actual production. But if you wanted to,\nyou would just point your DNS over to this production app, and you've got the\nproduction app running on GitLab infrastructure. And that’s, really, what Heroku\nprovided, right?\n\nBut that also is an onboarding thing for us to make it really easy. Because if\nwe want everybody to have CI, well, let’s turn it on for you. That’s pretty\nawesome. If we want everybody to have CD, we can’t just turn it on for you,\nbecause you have to have a place to deploy it to. So, if we just provided you a\nKubernetes cluster (\"everybody gets a cluster\"), then you just got a place. And,\nI mean, we’ll severely limit it. We’ll make it limited in some way, so that\nyou’re not going to run the production stuff for long there. Or if you do, you have\nto pay for it. But we’re not going to try and make money off of the production\nresources. We want to make money off of making it really easy. So, really, what\nwe want to do is encourage you to, then, go and spin up your own Kubernetes\ncluster, say, on Google. And we’ll make a nice little link that says, \"Go and\nspin up a cluster on GKE.\" We’ll make that really, really easy, but to make it\nsuper easy, for some number of days, we can just provide you that cluster,\nautomatically.\n\n### Feature flags – [#779](https://gitlab.com/gitlab-org/gitlab-ee/issues/779)\n\nFeature flags are really about decoupling delivery from deployment. It’s the\nidea that you make your code, you deploy it, but you haven’t turned it on, so\nit’s not delivered yet. And the idea there is that it means you can merge in the\nmain line, more often, because it’s not affecting anybody. And, also, it really\nhelps because you can do things like: when I do deliver, I can deliver it for\ncertain people; just GitLab employees or just the Beta group, and then I can\ncontrol that rollout. So then, if there's an error rate spike, well, it’s just\na few a people and I know who they are, and they’re going to complain to me.\nIt’s no big deal. But I can test things out, get it polished, fix the problems,\nbefore rolling it out. And then, you can also do things like, roll it out to 10 percent\nof the people, 50 percent of the people, whatever. It’s all about reducing risk, and\nimproving quality, and fundamentally about getting things into your mainline\nquicker. So, it’s ops-ish, in that sense, but it’s, really, still pretty fully\non dev.\n\n### Artifact management – [#2752](https://gitlab.com/gitlab-org/gitlab-ee/issues/2752)\n\nArtifact management has become a hot topic lately. We already have a container\nregistry for Docker image artifacts, and we also have file-based artifacts that\nyou can pass between jobs, and pass between pipelines, and even pass between\ncross project pipelines. And we have ways to download them, and browse them, but\nif those artifacts happen to be things like Maven or Ruby or node modules, and\nyou want to publish them, and then consume them in other pipelines, we don’t\nhave a formal way to do that.\n\nAnd you could, obviously, publish to the open source, RubyGems, for example. But\nif you want a private Gem, that is only consumed by your team... Maybe that's\nnot as big for Ruby developers, but Java developers do that all the time. A lot\nof Java developers use Artifactory or Sonatype Nexus. In order to complete the\nDevOps tool chain, we need to have some first class support for that, either by\nbundling in one of these other providers, or by adding layers, and APIs, on top\nof our existing artifacts. My personal pet favorite right now is, let’s say we\ncan just tag our existing artifact, and say, \"Oh, this is Maven type of\nartifact,\" and then we expose that via an API and so then you can declare that\nin another project, and it would just consume the APIs, and just know how to do\nthat. But it would also use our built-in authentication so you don’t have to set\nup creds and do all this declaration; you can be like, \"Oh, I’ve got access to\nthis project and this project, so I can get the artifacts, and I can consume it\nall really easily.\"\n\n### Auto DevOps – [#35712](https://gitlab.com/gitlab-org/gitlab-ce/issues/35712)\n\n*Note: We shipped the first iteration of Auto DevOps in [10.0](/releases/2017/09/22/gitlab-10-0-released/#auto-devops)*\n\nSo, let’s talk about Auto DevOps. This spans from the near-term to the very\nlong-term. It’s great that we do a lot of DevOps, and in a very simplistic way,\nit’s like, \"Oh, but shouldn’t we just make this stuff automatic?\" The way I\nphrase it is, we should provide the best practices in an easy and default way.\nYou can set up a GitLab CI YAML, but you have to actively go and do that. But,\nreally, every project should be running some kind of CI. So, why don’t we just\ndetect when you’ve pushed up a project; we’ll just build it, and we’ll go and\ntest it, because we know how to do testing. Today, with Auto Deploy, we already\nuse Auto Build, with build packs. We will automatically detect, I think, one of\nseven different languages, and automatically build your Java app, or Ruby, or\nNode... and we use Heroku’s build packs, actually, to do this build. And so we\nbuild that up, and when using Auto Deploy, we’ll go ahead and deploy that. You\nstill have to, obviously, have a Kubernetes cluster in order to do that, so it’s\nnot fully automated if you don’t have that. But if you’ve got Kubernetes, hey,\nthis is a literally one click. You pick from a menu, say, \"Oh, I’m on\nKubernetes,\" and then hit submit, and you’ve got Auto Deploy and Auto Build.\n\nBut one of the things we don’t have is Auto CI. And that’s a little annoying,\nbut it’s one of the things we want to pick up, and actually, hopefully our CTO,\nDmitriy, is going to pick that up in Q3; it's one of his OKRs. Heroku,\nthemselves, actually extended build packs to do testing, and so that means that\nthere’s at least five build packs that know how to test these languages. And so,\nhey, let’s use that. But even if that doesn’t work, there’s a lot of other\nthings we can do. Other companies have all this stuff automated, as well. So if\nwe can’t use Heroku CI, being able to say, \"Oh, this is this language; we know\nhow to test this language,\" we'll be making that automatic.\n\nAutomatic is multiple levels of things. Is it a wizard that configures this\nstuff for me? Is it one click checkbox, that says, \"Yes, turn on auto CI,\" or is\nit templates that I can easily add into my GitLab CI YAML? I think, in order to\nqualify as auto, what we have to do here is that it shouldn’t be templates. It\nshouldn’t be blog posts that tell me how to do it. That’s just CI. It should be,\nliterally, just \"I pushed and it worked;\" or at most a checkbox or two.\n\nLet’s go further, what other thing could we just automate here? And not automate\nstrictly for the purposes of automation, but about bringing best practices to\npeople. So, you have to actively work hard, to turn these things off. If you\ndon’t want CI, then shut it off, but by default you should have this.\n\nSo, this is a really, really long list of things that will take us forever to\nget to. The first ones have links, because we’re tracking real issues for this.\nAuto Metrics is a great one. If you’re running certain languages, you should\njust be able to, really easily, go and just pull the right information out of\nthere. But whatever, the list is huge.\n\nBut the idea is that we can build up this Auto DevOps, even the marketing term,\nand start talking about it in that way, and to not just say that GitLab is great\nfor your DevOps and is a complete DevOps tool chain. But, in fact, we do all\nthis stuff for you automatically.\n\nThere’s a lot to be done to make this fully automated. And what percentage of\nprojects can we really do? Auto Deploy is a great example that only works for\nweb apps. If it’s not a web app, we can’t just deploy it. What would it mean? We\ndeploy it, and it just wouldn’t function. If you made a command line app, what\nwould deploy even mean? Or if it’s a Maven, or really any kind of module that\nyou bundled up and released, that’s not the same thing as a deploy. So, maybe we\nneed an Auto Release. It’s not on this list, but maybe it should be. But within\nthe web app space, we can do some of this stuff automatically.\n\nSo that’s it. Everything you ever wanted to know about DevOps.\n",[9,1440,1090],{"slug":1861,"featured":6,"template":680},"devops-strategy","content:en-us:blog:devops-strategy.yml","Devops Strategy","en-us/blog/devops-strategy.yml","en-us/blog/devops-strategy",{"_path":1867,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1868,"content":1874,"config":1881,"_id":1883,"_type":14,"title":1884,"_source":16,"_file":1885,"_stem":1886,"_extension":19},"/en-us/blog/devops-tool-landscape",{"title":1869,"description":1870,"ogTitle":1869,"ogDescription":1870,"noIndex":6,"ogImage":1871,"ogUrl":1872,"ogSiteName":667,"ogType":668,"canonicalUrls":1872,"schema":1873},"The DevOps tool landscape","Competitive intelligence manager Mahesh Kumar describes the criteria we use when comparing GitLab to other DevOps tools.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670008/Blog/Hero%20Images/devops-tool-landscape.jpg","https://about.gitlab.com/blog/devops-tool-landscape","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The DevOps tool landscape\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mahesh Kumar\"},{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-11-01\",\n      }",{"title":1869,"description":1870,"authors":1875,"heroImage":1871,"date":1878,"body":1879,"category":787,"tags":1880},[1876,1877],"Mahesh Kumar","Chrissie Buchanan","2019-11-01","\nOne of the [core values](https://handbook.gitlab.com/handbook/values/) at GitLab is transparency, and it is in this spirit that we evaluate and articulate how GitLab fits into the competitive landscape. One of the ways we’ve demonstrated this transparency is by [listing other DevOps tools](/competition/) on our website and how they compare to functionality in GitLab. This approach is a little unorthodox but we believe this transparency not only helps teams make the right decisions, it also helps us identify where we can improve our product.\n\nFor any competitive comparison to be effective, it has to be fair, accurate, and easy to understand. Whether we’re comparing [three versions of Jenkins](/blog/jenkins-one-year-later/) to GitLab CI/CD, or comparing other [DevOps tools](/topics/devops/devops-tools-explained/) in the SDLC, we try to ensure these three key objectives of competitive comparisons are achieved.\n\n## Staying fair\n\nOne of the biggest challenges in competitive comparisons is staying fair and credible. The selection of competitive comparison criteria plays a significant role because it has to be comprehensive and not self-serving. Far too often vendors restrict competitive comparison criteria to what their product does well and avoid the gaps that might be in their products. At GitLab, we make a concerted effort to avoid this pitfall, and our culture of transparency keeps us honest in our assessment of where we excel and where we can do better.\n\nThe [GitLab Maturity Framework](/direction/maturity/) articulates the stages, categories, and features that constitute the end-to-end DevOps lifecycle. The maturity framework shows where GitLab provides an elevated user experience and also outlines our planned roadmap for the future. Since this framework takes a long-term view of criteria/features that constitute various DevOps stages and categories, we use this framework as a guide for our competitive comparisons.\n\nIn our GitLab Maturity Framework, we have a few categories where we rank as one of the best-in-class, both with industry analysts and GitLab users: Source code management, code review, and continuous integration (CI). To see one of these comparisons, check out our Jenkins CI page where we outline features, pricing, and a comprehensive overview.\n\n[Jenkins vs. GitLab](/devops-tools/jenkins-vs-gitlab/)\n{: .alert .alert-gitlab-purple .text-center}\n\n## Keeping it accurate\n\nHaving settled on criteria for evaluation, getting the data accurate is a major challenge. We have a structured information gathering process as laid out below:\n\n    1. Website\n    2. Documentation\n    3. Demos\n    4. Product install and usage\n    5. Customer feedback\n\nSometimes we are unable to complete this process for all vendor products for several reasons. First is the lack of available information either on a vendor's website or documentation. Second, we may be unable to access their product to validate certain capabilities. Some vendors do not provide a free or easily accessible version of the product, while others may explicitly prohibit the use of their product for comparison purposes. In either case, we restrict our comparison to publicly available details.\n\nThe second challenge in ensuring accuracy is that vendors don't always put out new releases and capabilities on a constant basis and our analysis may be slightly outdated. One of the best examples of this is, “when does one stop [painting the Golden Gate Bridge](http://goldengatebridge.org/research/facts.php#PaintHowOften)?” The answer is never! It’s an ongoing process that requires continuous paint touch-ups from one end to the other.\n\n## Everyone can contribute\n\nOur open source DNA extends to how we manage the tools landscape pages. We freely solicit input internally from multiple teams within GitLab and more importantly from other vendors’ teams. Anyone, including other vendors, can use GitLab to create an issue stating the change they wish to see or information they would like to correct. This issue is then assigned to the appropriate GitLab team to address. In fact, one Product Manager from a vendor recently contacted us about a change to their comparison page, and we gladly made that change.\n\nBy providing an opportunity to comment and give feedback, we hope to foster a dialog with those better informed about different products, thereby improving the tools landscape pages with rich and accurate information.\n\n## Easy to understand\n\nThe final challenge in comparison pages is to make them easy to interpret. We do this in two different ways: First, all the feature-level comparison is listed in the comparison page. For those interested in a particular feature or capability, they can easily scan the page to find the feature they’re looking for.\n\nSometimes the feature details need explanation, or perhaps there’s a feature that doesn’t quite fit into the “yes or no” mold. For that reason, we also provide a top-down analysis at the start of most comparison pages that provides a summary of features and provides additional context. This sometimes means a critical feature can get lost in the text, but we are doing our best to keep consistency across vendors and identify discrepancies quickly.\n\nThere are a lot of DevOps tools out there. As a complete [DevOps platform](/solutions/devops-platform/) delivered as a single application, GitLab can remove the pain of having to choose, integrate, learn, and maintain the multitude of tools necessary for a successful DevOps toolchain. If a DevOps tool is missing, feel free to [email us](mailto:incoming+gitlab-com-marketing-product-marketing-7424125-issue-@incoming.gitlab.com?subject=DevOps%20tool%20request&amp;amp;bcc=devopstools%40gitlab.com&amp;amp;body=-%20Tool%20name%3A%0D%0A-%20Stages%3A%0D%0A-%20Change%3A%0D%0A%0D%0A%0D%0APlease%20leave%20these%20label%20flags.%20%20%20%20%0D%0A%2Flabel%20~comparison%20~Servicedesk) or [create an issue](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#create-a-new-issue) and we’ll be happy to add a feature comparison for that product.\n\nCover image by [Troy Nikolic](https://unsplash.com/@troynikolic?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[109,1440,9],{"slug":1882,"featured":6,"template":680},"devops-tool-landscape","content:en-us:blog:devops-tool-landscape.yml","Devops Tool Landscape","en-us/blog/devops-tool-landscape.yml","en-us/blog/devops-tool-landscape",{"_path":1888,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1889,"content":1895,"config":1901,"_id":1903,"_type":14,"title":1904,"_source":16,"_file":1905,"_stem":1906,"_extension":19},"/en-us/blog/e-factor-productivity",{"title":1890,"description":1891,"ogTitle":1890,"ogDescription":1891,"noIndex":6,"ogImage":1892,"ogUrl":1893,"ogSiteName":667,"ogType":668,"canonicalUrls":1893,"schema":1894},"Improve your productivity by tracking your time and measuring your E-factor","Sharing my personal experience of how tracking my time while working remotely helped me be more productive.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673115/Blog/Hero%20Images/e-factor.jpg","https://about.gitlab.com/blog/e-factor-productivity","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Improve your productivity by tracking your time and measuring your E-factor\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matej Latin\"}],\n        \"datePublished\": \"2019-11-26\",\n      }",{"title":1890,"description":1891,"authors":1896,"heroImage":1892,"date":1898,"body":1899,"category":743,"tags":1900},[1897],"Matej Latin","2019-11-26","\nBack in the day, when I worked on-site and in open plan offices, I always felt unproductive despite being always busy. It was a paradox that I couldn’t understand. How come I’m rushing to do a lot of things all the time but still feel like I’m producing nothing that is truly valuable? Why do I get more work done in my “work from home day” that I only get every two weeks, than I do in the office?\n\nAfter joining GitLab and reading a couple of books on workplaces and productivity, I now understand why this was the case. Cal Newport’s [Deep Work](https://www.goodreads.com/book/show/25744928-deep-work) was the most illuminating book that I read on productivity. He breaks the types of work into two categories:\n\n**Shallow work**: *Non-cognitively demanding, logistical-style tasks, often performed while distracted. These efforts tend to not create much new value in the world and are easy to replicate.*\n\n**Deep work**: *The ability to focus, be uninterrupted for long stretches of time and fall into a [state of flow](https://en.wikipedia.org/wiki/Flow_(psychology)).*\n\nIn his **Deep Work Hypothesis**, he claims that the ability to focus separates the top performers from the rest:\n\n> The ability to perform deep work is becoming increasingly rare at exactly the same time it’s increasingly valuable in our economy. As a consequence, the few who cultivate this skill and then make it the core of their working life will thrive.\n\nWhile I was doing a lot of different things at the same time, it was mostly reactive work instead of valuable, [proactive](/handbook/product/ux/how-we-work/#proactive-and-reactive-ux) work. Replying to emails, attending meetings, chatting on Slack, and similar work demands a lot of energy but returns very little, if any, value. Taking this all into account, I decided to go back to working remotely because I knew [I could control my working environment better and be more productive](/blog/eliminating-distractions-and-getting-things-done/). That’s why I ended up joining GitLab.\n\n## The E-factor\n\n*Peopleware* by Tom DeMarco and Timothy Lister is another book that is popular with GitLab team members. In the book, the authors introduce a concept called *the E-factor*. To put it simply, the E-factor is about measuring brain time *versus* body time – so how much time a person is working at their full potential *versus* how much time they’re present at the office. The formula to calculate it is the following:\n\n> E-factor = uninterrupted hours / body-present hours\n\nSo when I worked in open plan offices, I was present for about eight hours, but had a maximum of about one or two hours of uninterrupted time. That means that my E-factor ranged from *0.125 to 0.25*. It’s impossible to produce valuable work with such a low E-factor. Switching to working remotely at an all-remote company immediately improved this but I recently decided to take it even further. I measured how I spent my time for two weeks while working at GitLab. The first week was to document how I had already been spending my time and then the second week with the introduction of improvements that would increase my uninterrupted time. Research suggests that intense concentration is only possible for up to four hours per day so I was aiming to get to four hours of uninterrupted time altogether, but ideally in a single block. Here’s how I spent time before the improvements:\n\n![My week before improvements](https://about.gitlab.com/images/blogimages/before-improvements.jpg){: .large.center}\n\nI tracked my time by dividing days into 15-minutes blocks. Light grey is sleep, light blue is family time, and dark blue is work time. Red colors are for shallow work, meetings and email time. The more of the dark blue blocks and the more connected the better.\n{: .note.text-center}\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nGet the [Google Spreadsheet template for tracking time](https://docs.google.com/spreadsheets/d/10CnZlCW0fu-GXqGhK7Lysj5QzTGIqYjdv6yrUlbARzo/edit?usp=sharing). Go to *File* > *Make a copy* to get an editable version.\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nBefore introducing the improvements, this is how my usual day looked like:\n\n* I checked my email first thing in the morning, which could cause me to spend up to one hour just replying to other people.\n* I used to study a book or take course lessons in the morning as a part of professional self-improvement. This was usually half an hour. By the time I actually started working it’d be 9:30.\n* I’d work for a couple of hours and stop for a quick snack at 11:30. This was the first stretch of uninterrupted time.\n* After the snack I’d have another similar stretch of time but that was usually just an hour (mostly because I’d get distracted with shallow work).\n\nSo if I put all this together, I had about three hours of uninterrupted time every day. It’s not that bad (and it’s definitely better than what I experienced in on-site roles in the past) but I wanted to do better. I especially wanted to increase the amount of uninterrupted time in a single stretch. So I decided to make the following improvements:\n\n* I started checking my email in the afternoon, after lunch (that’s 3pm for me).\n* I moved the self-improvement activities until after the first snack at 11:30am.\n* I realized I spent an hour and a half showering and eating breakfast in the morning, which was way too much. I reduced this to one hour so I could start working 30 minutes earlier (8am instead of 8:30am).\n\n![My week after improvements](https://about.gitlab.com/images/blogimages/after-improvements.jpg){: .large.center}\nA lot more dark blue, and a lot more of connected dark blue blocks after improvements.\n{: .note.text-center}\n\nWith these improvements, I was able to increase the first stretch of uninterrupted time from two hours to three and a half hours. With an additional one to two hours of uninterrupted time after the snack that can sum up to four and a half to five and a half hours of uninterrupted time each day. My E-factor increased to *0.6875*, that’s a **275% increase** compared to my times in the office! These changes to my workflow help me perform deep work and fall into a state of flow twice a day, and I noticed drastic improvements in my productivity and in my psychological state as well.\n\n## Things that enabled me to introduce these improvements\n\n### Separate room for work\n\nI have a study at home where I can be alone and focus. I think this is a very important thing for all remote workers.\n\n### Strong working routine\n\nAt GitLab, working remotely and asynchronously gives us the [freedom to shape our working schedule as we please](https://handbook.gitlab.com/handbook/values/#managers-of-one) but a strong working routine has lots of benefits. Starting work at the same time in the morning helps with creating more uninterrupted time and productivity.\n\n### Timezone\nI’m based in Europe and most of my colleagues are based in the U.S. This means that I can easily block out time for focused work and eliminate all distractions, including Slack.\n\n### My Slack and email policy\n\nEven when I’m not in my focus time, [I have Slack notifications disabled](https://handbook.gitlab.com/handbook/values/#bias-towards-asynchronous-communication). I even disabled the small red dots on the app icon in the dock so that nothing has the possibility of distracting me. As for email, I’ll only check my inbox after lunch, that’s well after I had my two blocks of uninterrupted time.\n\n### Writing down tasks\n\nI always write down the things that I need to work on. I have a small notebook on my desk and at the end of each day, I write down the things I need to work on the next day. This way, I can go straight to work in the morning.\n\n### Keeping a journal of tasks\n\nRecently, I also started keeping track of all the things I need to work on in my “tasks journal”. It’s just a project on GitLab where I keep a couple of Markdown files for current tasks that I’m working on and an archive of tasks that I worked on in the past. They’re all divided by weeks. For example, at the time of writing this paragraph, it’s week 33 of this year so my [current tasks](https://gitlab.com/matejlatin/focus/blob/master/Tasks/current.md) are things that I want to work on in this week. At the end of the week, I’ll check the progress and [archive it](https://gitlab.com/matejlatin/focus/tree/master/Tasks) so I can always check back later.\n\nKeeping a task journal adds a stronger sense of continuity and sharp focus to my work. In the spirit of [transparency](https://handbook.gitlab.com/handbook/values/#public-by-default), I share this publicly with all my co-workers so everyone can see what I’m working on and check my availability.\n\n### Working asynchronously\n\nOne of the greatest benefits of working at GitLab is [being encouraged to work asynchronously](/handbook/communication/). Because our team isn't tied to the same working hours, I can block out time for focus without feeling guilty that I’m not available to everyone all the time. It’s interesting how working like this makes you realize that most interruptions aren’t as urgent as we tend to believe.\n\n## Advice for non-remote workers\n\nIf you’re required to work in an office – possibly a working environment full of distractions – implementing these strategies can be a lot more challenging. My advice for non-remote workers is to ask your manager for “work from home” days. Maybe start with one day per week and see how it goes. If your manager doesn't agree, try tracking your time when you work in the office like I did. Present the chart to them and tell them about the deep work and the E-factor. Explain to your manager that you want to increase your uninterrupted time which will help you complete more valuable work. Tell them how working from home will help you achieve this, and how you will change your workflow to be more productive (look for inspiration from my improvements I described in this article). Be committed to producing more meaningful work and be clear that working from home is only a means to an end. Offer to track your time at home to compare with your time spent your in the office, especially if your manager doesn’t seem to be in favor of these changes.\n\nIf working from home is still not an option, consider finding a quiet spot in the office where you’ll be uninterrupted: Perhaps the lounge, the garden, or even the reception area. Try moving to an area away from your teammates and sit with people you don’t know as well. They’re much less likely to disturb you. When I was working from a busy office in central London, I loved going to a coffee shop for an hour or two. I managed to get some work done and enjoyed the short trip to the shop and back. The walk and getting out of the office helped me relax a bit as well.\n\nThese changes to how we work are all about improving productivity and quality of work. In an ideal working environment, everyone would measure their E-factors and they’d brag about their uninterrupted time instead of complaining about how many meetings they have to attend in an effort to perform busyness to their colleagues.\n\nPhoto by [Émile Perron](https://unsplash.com/@emilep?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/productivity?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,700,1698],{"slug":1902,"featured":6,"template":680},"e-factor-productivity","content:en-us:blog:e-factor-productivity.yml","E Factor Productivity","en-us/blog/e-factor-productivity.yml","en-us/blog/e-factor-productivity",{"_path":1908,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1909,"content":1915,"config":1921,"_id":1923,"_type":14,"title":1924,"_source":16,"_file":1925,"_stem":1926,"_extension":19},"/en-us/blog/elasticsearch-update",{"title":1910,"description":1911,"ogTitle":1910,"ogDescription":1911,"noIndex":6,"ogImage":1912,"ogUrl":1913,"ogSiteName":667,"ogType":668,"canonicalUrls":1913,"schema":1914},"Update: The challenge of enabling Elasticsearch on GitLab.com","How we got started with enabling Elasticsearch on the largest GitLab instance, GitLab.com.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666832/Blog/Hero%20Images/enable-global-search-elasticsearch.jpg","https://about.gitlab.com/blog/elasticsearch-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update: The challenge of enabling Elasticsearch on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nick Thomas\"}],\n        \"datePublished\": \"2019-07-16\",\n      }",{"title":1910,"description":1911,"authors":1916,"heroImage":1912,"date":1918,"body":1919,"category":743,"tags":1920},[1917],"Nick Thomas","2019-07-16","\nBack in March, [Mario](/company/team/#mdelaossa) shared some of the [lessons we'd learned from our last attempt to enable\nElasticsearch](/blog/enabling-global-search-elasticsearch-gitlab-com/) on GitLab.com, an integration that would unlock both [Advanced Global Search](https://docs.gitlab.com/ee/user/search/advanced_search.html)\nand [Advanced Syntax Search](https://docs.gitlab.com/ee/user/search/advanced_search.html). Since then, we've been working hard to address problems with the integration and prepare for [another attempt](https://gitlab.com/groups/gitlab-org/-/epics/853).\n\n## Selective indexing\n\nAt the heart of our dilemma was a classic \"chicken and egg\" problem. We needed\nto gather more information about [Elasticsearch](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html) to make improvements to the total\nindex size, but without an active deployment, that information was very hard to\ngather. Customer feedback and small-scale testing in development environments\nall help, but [dogfooding](https://handbook.gitlab.com/handbook/values/#dogfooding)\nthe integration is the best way to get the information we require.\n\nTo resolve this, we prioritized changes to enable Elasticsearch integration on\nGitLab.com. Since the index size was a hard problem, this meant some kind of\nselective indexing was necessary, so we've added\n[per-project and per-group controls](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html).\n\nOn Jun. 24, 2019, we enabled the integration for the `gitlab-org` group on\nGitLab.com. Now, any searches at the group or project level will make use of the\nElasticsearch index, and the advanced features the integration unlocks will be available.\nWe figured, why not [give it a try](https://gitlab.com/search?search=gitlab-org+%28gitaly+%7C+ee%29&group_id=9970)?\n\nThe total index size for this group – which includes about 500 projects – is around 2.2\nmillion documents and 15GB of data, which is really easy to manage from the point of view of\nElasticsearch administration. The indexing operation itself didn't [go as smoothly as we hoped](https://gitlab.com/gitlab-com/gl-infra/production/issues/800), however!\n\n## Bug fixes\n\nAnother advantage to having selective Elasticsearch indexing enabled on GitLab.com\nis that our engineers need confidence that the feature is performant,\nthat it won't threaten the overall stability of GitLab.com, and that it is\nsubstantially bug-free. So we went through a [Production Readiness Review](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/64)\nbefore enabling it. The review uncovered a number of pre-existing bugs and new regressions, which have all been fixed in the\n[12.0 release](/releases/2019/06/22/gitlab-12-0-released/). Some of the bugs included:\n\n* [Elasticsearch was sometimes used for searches, even when disabled](https://gitlab.com/gitlab-org/gitlab-ee/issues/11795)\n* [Performance regression indexing database content](https://gitlab.com/gitlab-org/gitlab-ee/issues/11595)\n* [Regression searching for some projects at group level](https://gitlab.com/gitlab-org/gitlab-ee/issues/12091)\n* [Regression visiting page 2 of search results](https://gitlab.com/gitlab-org/gitlab-ee/issues/12254)\n* [Wiki indexing still relied on a shared filesystem](https://gitlab.com/gitlab-org/gitlab-ee/issues/11269)\n* [Searching snippets with Elasticsearch enabled still queries the database, not Elasticsearch](https://gitlab.com/gitlab-org/gitlab-ee/issues/10548)\n\nWe still can't claim to be bug-free, of course, but the picture is a lot rosier than if we'd attempted to roll out this feature without first using it ourselves.\n\nWe'd tested the new indexing code on our staging environment, but this was last\nrefreshed more than a year ago, and was significantly smaller than the group on\nGitLab.com, containing around 150 projects. As a result, some bugs and\nscalability issues were uncovered for the first time in production. We're\naddressing them with high priority in the 12.1 and 12.2 releases. The scaling issues include:\n\n* [Project imports unconditionally enqueue an ElasticCommitIndexerWorker](https://gitlab.com/gitlab-org/gitlab-ee/issues/12362)\n* [Allow maximum bulk request size to be configured](https://gitlab.com/gitlab-org/gitlab-ee/issues/12375)\n* [Intelligently retry bulk-insert failures when indexing](https://gitlab.com/gitlab-org/gitlab-ee/issues/12372)\n* [Note bulk indexing often fails due to statement timeout](https://gitlab.com/gitlab-org/gitlab-ee/issues/12402)\n* [Cannot index large snippets](https://gitlab.com/gitlab-org/gitlab-ee/issues/12111)\n* [Removing documents from the index can fail with a conflict error](https://gitlab.com/gitlab-org/gitlab-ee/issues/12114)\n\nOnce these issues are addressed, indexing at scale should be quick, easy, and\nreliable. Indexing at scale is invaluable from the point of view of an engineer trying out\nchanges to reduce total index size.\n\n## Administration\n\nAnother area for improvement is administering the indexing process\nitself. Although GitLab automatically creates, updates, and removes documents\nfrom the index when changes are made, backfilling existing data required manual\nintervention, running a set of complicated (and slow) rake tasks to get the\npre-existing data into the Elasticsearch index. Unless these instructions were\nfollowed correctly, search results would be incomplete. There was also no way\nto configure a number of important parameters for the indexes created by GitLab.\n\nWhen using the selective indexing feature, GitLab now automatically enqueues\n\"backfill\" tasks for groups and projects as they are added, and removes the\nrelevant records from the index when they are supposed to be removed. We've also made it possible to\n[configure the number of shards and replicas](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html)\nfor the Elasticsearch index directly in the admin panel, so when GitLab creates\nthe index for you, there's no need to manually change the parameters afterwards.\n\nPersonal snippets are the one type of document that won't be respected in the\nselective-indexing case. To ensure they show up in search results, you'll still\nneed to run the [`gitlab:elastic:index_snippets`](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html) rake task [for now](https://gitlab.com/gitlab-org/gitlab-ee/issues/12333).\n\nThere are also improvements if you're not using selective indexing – the admin\narea now has a \"Start indexing\" button. Right now, this only makes sense if\nstarting from an empty index, and doesn't index personal snippets either, but\nwe're hopeful we can [remove the rake tasks entirely](https://gitlab.com/gitlab-org/gitlab-ee/issues/11206)\nin the future.\n\n## What next?\n\nWe're really happy to have Elasticsearch enabled for the `gitlab-org` group, but\nthe eventual goal is to have it [enabled on all of GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/153).\nWe'll be rolling it out to more groups in the future.\n\nTo get there, we'll need to continue to improve the\n[administration experience](https://gitlab.com/groups/gitlab-org/-/epics/428) using Elasticsearch.\nFor instance, it's still difficult to see the indexing status of a group or\nproject at a glance, a function that would be really useful for our support team to answer\nqueries like \"Why isn't this search term returning the expected results?\"\n\n### Managing the Elasticsearch schema is also a challenge\n\nCurrently, we take the easy route of reindexing everything if we need to change some aspect of the\nschema, which doesn't scale well as the index gets larger. [Some\nwork on this is ongoing](https://gitlab.com/gitlab-org/gitlab-ee/issues/328),\nand the eventual goal is for GitLab to automatically manage changes to the\nElasticsearch index in the same way it does for the database.\n\n[Reducing the index size](https://gitlab.com/groups/gitlab-org/-/epics/429) is\nstill a huge priority, and we hope to make progress on this now that we\nhave an Elasticsearch deployment to iterate against.\n\n### We'd also like to improve the quality of search results\n\nFor example, we have\nreports of code search [failing to find certain identifiers](https://gitlab.com/gitlab-org/gitlab-ee/issues/10693) and we'd like to use the Elasticsearch index in more contexts, such as for\n[filtered search](https://gitlab.com/gitlab-org/gitlab-ee/issues/12082).\n\nThe Elasticsearch integration is progressing. Finally, responsibility for the Elasticsearch integration has been passed from\nthe [Plan stage](/handbook/product/categories/#plan-stage)\nto the [Editor group of the Create stage](/handbook/product/categories/#editor-group).\nI hope you'll join Mario and me in wishing [Kai](/company/team/#phikai),\n[Darva](/company/team/#DarvaSatcher), and the rest of the team the best of luck in tackling the remaining challenges for Elasticsearch. An up-to-date overview of their plans can always be found on\nthe [search strategy](/direction/global-search/) page.\n\nPhoto by [Benjamin Elliott](https://unsplash.com/photos/vc9u77c0LO4) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[677,231,9],{"slug":1922,"featured":6,"template":680},"elasticsearch-update","content:en-us:blog:elasticsearch-update.yml","Elasticsearch Update","en-us/blog/elasticsearch-update.yml","en-us/blog/elasticsearch-update",{"_path":1928,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1929,"content":1935,"config":1940,"_id":1942,"_type":14,"title":1943,"_source":16,"_file":1944,"_stem":1945,"_extension":19},"/en-us/blog/email-opt-in-policy-announcement",{"title":1930,"description":1931,"ogTitle":1930,"ogDescription":1931,"noIndex":6,"ogImage":1932,"ogUrl":1933,"ogSiteName":667,"ogType":668,"canonicalUrls":1933,"schema":1934},"New email policy: Let us know if you want to hear from us!","We're changing our email policy, and you'll only hear from us if you explicitly opt in.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683564/Blog/Hero%20Images/email-policy-change.jpg","https://about.gitlab.com/blog/email-opt-in-policy-announcement","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New email policy: Let us know if you want to hear from us!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2017-08-31\",\n      }",{"title":1930,"description":1931,"authors":1936,"heroImage":1932,"date":1937,"body":1938,"category":299,"tags":1939},[950],"2017-08-31","\n{::options parse_block_html=\"true\" /}\n\n\u003Cdiv class=\"panel panel-info\">\n\n**Note from September 30, 2018: Email policy has been updated**\n{: .panel-heading}\n\n\u003Cdiv class=\"panel-body\">\n\nAt GitLab, we strive to communicate with people in a way that is beneficial to them. Most of our email marketing communications follow an explicit opt-in policy, although at times, we may communicate via email to people who have not explicitly opted in. We do this to offer something of value (e.g. an invitation to a workshop, dinner, the opportunity to meet an industry leader, etc. – not an email inviting you to read a blog post). We always include the unsubscribe link in our communications and we respect the unsubscribe list.\n\n\u003C/div>\n\u003C/div>\n\n{::options parse_block_html=\"false\" /}\n\nWith [GitLab 9.5](/releases/2017/08/22/gitlab-9-5-released/) we introduced a change to our email policy. If you want to keep hearing from us (we hope you do!) you'll need to opt in by visiting the [subscription center](https://page.gitlab.com/SubscriptionCenter.html).\n\n\u003C!-- more -->\n\nIn the past, signing up for GitLab.com opted you in automatically to a subscription to our newsletter. Many of our users read and enjoy it, but we want to give you a choice, so we're changing our policy to send communication with your explicit opt-in only. With this change, when you sign up or visit your subscription center, you'll be able to see all your options and have full control over what types of messages you receive from us*.\n\n![New email subscription boxes](https://about.gitlab.com/images/blogimages/email-policy-opt-in.png){: .shadow}\n\nNow you can specify that you want to hear about upcoming events or webcasts, or be kept in the know with security alerts. Tick as many or as few boxes as suits you.\n\n*You may still receive system emails associated with your account or GitLab instance\n{: .note}\n\n\"[Muriwai, New Zealand](https://unsplash.com/@mathyaskurmann?photo=fb7yNPbT0l8)\" by [Mathyas Kurmann](https://unsplash.com/@mathyaskurmann) on Unsplash\n{: .note}\n",[675,9],{"slug":1941,"featured":6,"template":680},"email-opt-in-policy-announcement","content:en-us:blog:email-opt-in-policy-announcement.yml","Email Opt In Policy Announcement","en-us/blog/email-opt-in-policy-announcement.yml","en-us/blog/email-opt-in-policy-announcement",{"_path":1947,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1948,"content":1953,"config":1959,"_id":1961,"_type":14,"title":1962,"_source":16,"_file":1963,"_stem":1964,"_extension":19},"/en-us/blog/enabling-global-search-elasticsearch-gitlab-com",{"title":1949,"description":1950,"ogTitle":1949,"ogDescription":1950,"noIndex":6,"ogImage":1912,"ogUrl":1951,"ogSiteName":667,"ogType":668,"canonicalUrls":1951,"schema":1952},"Lessons from our journey to enable global code search with Elasticsearch on GitLab.com","Read about some of the dead ends we've encountered on the way to enabling global code search on GitLab.com, and how we're working on a way forward.","https://about.gitlab.com/blog/enabling-global-search-elasticsearch-gitlab-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Lessons from our journey to enable global code search with Elasticsearch on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mario de la Ossa\"}],\n        \"datePublished\": \"2019-03-20\",\n      }",{"title":1949,"description":1950,"authors":1954,"heroImage":1912,"date":1956,"body":1957,"category":743,"tags":1958},[1955],"Mario de la Ossa","2019-03-20","\nWe're [working hard to switch our search infrastructure on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/153) to\ntake advantage of our [Elasticsearch integration](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html), which should allow us to improve global search and enable global code search for our users.\n\nEnabling this integration on GitLab.com is important to us because it will unlock better search performance and allow us\nto improve the relevance of results for our GitLab.com users – something our self-managed users have been able to take advantage of for a few years now.\nWe've been working on this for a while, and have hit many dead ends and pitfalls which maybe you can learn from too.\n\n## Our plan\n\nWe have two very important things that need to happen: we must reduce the Elasticsearch index size,\nand we must improve the administration of the Elasticsearch integration.\n\n## 1. Reduce index size\n\nCurrently, the Elasticsearch index utilizes approximately 66 percent of the space the repos use.\nThis is our biggest blocker, as this is the bare minimum amount of space required – this number goes up when you consider the need for replicas.\n\nWe've attempted multiple things to get the index size down, but all of them resulted in minimal (or no) changes at all,\nso due to the complexity of implementing the changes we've decided to ignore them (at least for now).\n\n### Things we've tried\n\n#### Force merges\n\nWhen you delete a document from Elasticsearch, it doesn't actually free up space right away.\nInstead it does a soft delete, and Elasticsearch will release the space used in the future via an operation called a [merge](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-merge.html).\n\nIn [gitlab-org/gitlab-ee#7611](https://gitlab.com/gitlab-org/gitlab-ee/issues/7611) we investigated the possibility of forcing Elasticsearch\nto reclaim this space periodically via an operation called a [forcemerge](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html).\nThis seemed like a very worthwhile thing to investigate as an Elasticsearch index could theoretically grow up to 50 percent more due to these soft deletions.\nIn the end though, we found out that a `forcemerge` is a blocking call, and causes extreme performance degradation while it runs –\nnot something you want in a production environment!\nSadly we were forced to abandon this, but we did learn a bit more about [how to tune Elasticsearch so merges are less painful, which we documented here](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html).\n\n#### NGram sizes\n\nIn order to allow users to search without using exact phrases (it would be annoying if a search for \"house\" didn't bring up \"houses\" for\nexample) we use what is called an [Edge NGram](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-edgengram-tokenizer.html)\nfilter for blobs (code files) and SHA1 strings (commit IDs).\n\nWe have our Edge NGram filters set to create a maximum length of 40.\nRight off the bat we knew we could not lower the maximum size for our SHA1 filter, since we want our users to be able to find commits no matter how many characters of the ID they give us, and the maximum is 40.\n\nWe could, however, play with the Edge NGram filter we use to analyze code, so we tested a few different scenarios in [gitlab-org/gitlab-ee#5585](https://gitlab.com/gitlab-org/gitlab-ee/issues/5585).\nWe came up with conflicting results, but the savings were between 7-15 percent.\nNot bad! We still haven't changed the maximum length though, as we still need to confirm that searching is not impacted unduly with such a change.\n\n#### Separate indexes\n\nCurrently, our Elasticsearch integration lumps all document types into the same index.\nThis is because, in order to only return results to which a user has access, we must check the Project the object belongs to for the user's access level, which would be very expensive to do if we had to do it result per result after Elasticsearch returns the results of the query.\n\nThat said, there was a chance that having separate indexes could improve our space usage, and it would definitely improve the re-indexing\nexperience, so in [gitlab-org/gitlab-ee#3217](https://gitlab.com/gitlab-org/gitlab-ee/issues/3217) we took a stab at it.\nWe learned that having separate indexes does nothing for space usage, which we already suspected since Elasticsearch 6.0 shipped with great support for [sparse fields](https://www.elastic.co/blog/minimize-index-storage-size-elasticsearch-6-0).\n\nWe're still looking into having separate indexes, as in testing we have discovered it [greatly improves indexing speed](https://gitlab.com/gitlab-org/gitlab-ee/issues/3217#note_130304358)\nand should also improve the experience of having to re-index certain models.\n\n## 2. Improve administration capabilities for Elasticsearch\n\nRight now, all administration related to Elasticsearch must be done on the Elasticsearch cluster directly.\nWe also currently require the Elasticsearch integration to be an all-or-nothing deal: you must enable it for all projects, or none of them.\nTo make matters worse, when we make a change to the index schema, we require a full re-index of the entire repo right away in order for the update to work.\nWe need to fix all these things and make Elasticsearch easier to administer from within GitLab if we want to have a fighting chance at\nenabling Elasticsearch support on GitLab.com.\n\nSome concrete things we're working on:\n\n### Better cluster visibility\n\nIn order to help the administration of Elasticsearch, we must enable better controls for it from within GitLab.\nIssues [gitlab-org/gitlab-ee#3072](https://gitlab.com/gitlab-org/gitlab-ee/issues/3072) and\n[gitlab-org/gitlab-ee#2973](https://gitlab.com/gitlab-org/gitlab-ee/issues/2973) aim to provide a simple, but functional, admin interface\nfor Elasticsearch within GitLab.\n\n### Graceful recovery\n\nCurrently, if some data fails to index, whether due to a Sidekiq outage or any other reason, the only solution is to\nre-index the full Elasticsearch cluster, which is painful! In [gitlab-org/gitlab-ee#5299](https://gitlab.com/gitlab-org/gitlab-ee/issues/5299)\nwe will be looking into ways to improve this.\n\n### Selective/progressive indexing\n\nIn [gitlab-org/gitlab-ee#3492](https://gitlab.com/gitlab-org/gitlab-ee/issues/3492) we will be taking a look at enabling\nElasticsearch on a project-by-project basis.\n\n### Allow disabling of code indexing\n\nIn [gitlab-org/gitlab-ee#7870](https://gitlab.com/gitlab-org/gitlab-ee/issues/7870) we're investigating making\ncode indexing optional. What this would mean is that global code search would not be available, but searching within a\nproject would work as it currently does, backed by direct Gitaly searches. This is attractive to us as it would bring\nsearch improvements to Projects, Groups, Issues, and Merge Requests. This will also be a very useful feature for self-managed\ninstances that want to have better search support for Issues/MRs/etc. but don't really need global code search. Indexing\nthe repos to enable global code search takes an incredible amount of time, so offering the choice of disabling it gives our\nself-managed users more choice.\n\n### Shard Elasticsearch per group\n\nIn [gitlab-org/gitlab-ee#10519](https://gitlab.com/gitlab-org/gitlab-ee/issues/10519) we're considering having separate Elasticsearch\nservers per group, similar to how Gitaly works, but on a group level instead of project level. Elasticsearch servers can become very large,\nreducing performance and making them less maintainable. By having a separate server per group we would also gain resiliency in case one\ncluster goes down, as only the group related to that cluster would be affected.\n\nWe're still investigating this approach as there are some concerns about how search would work if we had separate Elasticsearch servers per group.\n\n## The future\n\nWe haven't given up yet! We have high hopes that we'll find ways to lower usage enough to make better search available to all our users.\n\nMeanwhile, we're switching all our engineering time from lowering index usage to improving administration capabilities, as we feel that\nenabling things like selective indexing of projects will allow us to improve our Elasticsearch integration with more confidence, as we will\nbe dogfooding our changes in production.\n\nIf you'd like to follow along with us, feel free to check out the following epics: [gitlab-org&153](https://gitlab.com/groups/gitlab-org/-/epics/153),\n[gitlab-org&429](https://gitlab.com/groups/gitlab-org/-/epics/429), and [gitlab-org&428](https://gitlab.com/groups/gitlab-org/-/epics/428).\nIf you have any concerns, comments, etc. we'll be glad to hear them. Remember, everyone can contribute!\n\nPhoto by [Benjamin Elliott](https://unsplash.com/photos/vc9u77c0LO4) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[677,231,9],{"slug":1960,"featured":6,"template":680},"enabling-global-search-elasticsearch-gitlab-com","content:en-us:blog:enabling-global-search-elasticsearch-gitlab-com.yml","Enabling Global Search Elasticsearch Gitlab Com","en-us/blog/enabling-global-search-elasticsearch-gitlab-com.yml","en-us/blog/enabling-global-search-elasticsearch-gitlab-com",{"_path":1966,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1967,"content":1973,"config":1979,"_id":1981,"_type":14,"title":1982,"_source":16,"_file":1983,"_stem":1984,"_extension":19},"/en-us/blog/enforcing-managing-2fa-support-security",{"title":1968,"description":1969,"ogTitle":1968,"ogDescription":1969,"noIndex":6,"ogImage":1970,"ogUrl":1971,"ogSiteName":667,"ogType":668,"canonicalUrls":1971,"schema":1972},"This is what happens if you lose access to your 2FA GitLab.com account","Support Engineering Manager Lyle Kozloff explains why we no longer accept government ID for two-factor authentication removal.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666816/Blog/Hero%20Images/security-cover.png","https://about.gitlab.com/blog/enforcing-managing-2fa-support-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"This is what happens if you lose access to your 2FA GitLab.com account\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lyle Kozloff\"}],\n        \"datePublished\": \"2018-10-08\",\n      }",{"title":1968,"description":1969,"authors":1974,"heroImage":1970,"date":1976,"body":1977,"category":299,"tags":1978},[1975],"Lyle Kozloff","2018-10-08","\nYou may have read my previous post about [how to keep your GitLab account safe and accessible](/blog/keeping-your-account-safe/). That came about because the Support Team recently changed how we verify your identity when you lose access to your GitLab.com account and request that the [two-factor authentication (2FA)](https://docs.gitlab.com/ee/user/profile/account/two_factor_authentication.html) be reset. This was a collaborative effort between our Support and Security teams, and I wanted to share our updated, more secure process.\n\nUp until recently, the procedure for regaining access to your account started with resetting with an [SSH key](https://docs.gitlab.com/ee/user/ssh.html). Many users didn't have one registered, so the standard fallback for proving your identity was to provide your government-issued ID for verification. This is fairly common, but has a couple of drawbacks:\n\n- Many GitLab users don't use their real names on their GitLab accounts, so \"@elitehacker,\" for example, would have a pretty hard time proving their identity that way.\n- Also, GitLab, unlike other companies, doesn't use an independent verification service to assess these IDs. I don't even know what an Illinois driver's license looks like, let alone one issued by a country I've never been to. So there's a risk that our team wouldn't be able to identify fraudulent IDs.\n\n## How we authenticate users without using government ID\n\nWith this in mind, we started discussing ways to authenticate users that didn't rely on government-issued ID. I chatted to [Westley](/company/team/#wvandenberg) on the Security team, and got some insight into different approaches he had seen when he previously worked at Amazon. This is what the process looks like now:\n\n### Step 1: Determine risk factor\n\nThe first step is to classify the data we're potentially granting access to if we reset 2FA. There's a vast difference in risk between effectively granting access to thousands of private repositories which look like they contain secret government data, and granting access to a handful of tutorials on Angular that are public. So we came up with four different classifications based on what a user would get access to if we reset their 2FA – you can check out [the first iteration of these in the discussion in the issue](https://gitlab.com/gitlab-com/security/issues/45). This is a peer-reviewed process, so there will always be another agent confirming that the classification looks appropriate.\n\n### Step 2: Pose authentication challenges\n\nTogether Westley and I came up with a series of challenges the Support Team can pose to users who have lost access, which require knowledge and familiarity with the user's account. These challenges are given scores, and depending on what classification your account is given, there will be a minimum score you need to attain in order for us to reset your 2FA. The set of challenges posed is selected by the agent handling the ticket, and it may differ each time.\n\nThere's no one, single factor that will get you into your account – the spirit is rather that you can build a body of evidence to verify your identity, rather than relying on one thing (which used to be the case with the government ID). If you succeed in the challenges, we will reset your 2FA so you can get back into your account.\n\nThese challenges aren't made public – we're not going to give away exactly what you need to access a 2FA account, obviously 😆 We'll keep [iterating](https://handbook.gitlab.com/handbook/values/#iteration) on them too.\n\nAs mentioned, this new workflow is really a result of collaboration between Support and Security. Having identified that our existing process was less than ideal, we asked for an audit of our proposal from Security, to get their stamp of approval and ensure that we were leveraging our internal resources to keep our users' accounts safe. You can [check out the issue for this consultation with Security here](https://gitlab.com/gitlab-com/security/issues/45) for the full discussion.\n\nTo avoid resetting your 2FA altogether, here's [how to keep your GitLab account safe and accessible](/blog/keeping-your-account-safe/).\n",[811,9,720],{"slug":1980,"featured":6,"template":680},"enforcing-managing-2fa-support-security","content:en-us:blog:enforcing-managing-2fa-support-security.yml","Enforcing Managing 2fa Support Security","en-us/blog/enforcing-managing-2fa-support-security.yml","en-us/blog/enforcing-managing-2fa-support-security",{"_path":1986,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1987,"content":1993,"config":1999,"_id":2001,"_type":14,"title":2002,"_source":16,"_file":2003,"_stem":2004,"_extension":19},"/en-us/blog/engineering-director-shadow",{"title":1988,"description":1989,"ogTitle":1988,"ogDescription":1989,"noIndex":6,"ogImage":1990,"ogUrl":1991,"ogSiteName":667,"ogType":668,"canonicalUrls":1991,"schema":1992},"The engineering director shadow experience at GitLab","Shadowing an engineering director at GitLab was an immersive, collaborative experience. Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667509/Blog/Hero%20Images/continuous-integration-from-jenkins-to-gitlab-using-docker.jpg","https://about.gitlab.com/blog/engineering-director-shadow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The engineering director shadow experience at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2022-04-01\",\n      }",{"title":1988,"description":1989,"authors":1994,"heroImage":1990,"date":1996,"body":1997,"category":299,"tags":1998},[1995],"William Arias","2022-04-01","\n\nMy [engineering director shadow](/handbook/engineering/development/shadow/director-shadow-program.html) experience reminded me of a concept that gained relevance during the pandemic:\n\n> \"I am because we are\" set in the context of the actual state of the world: I will be safe when all and each of us is safe.\n\nThe inspiration of these ideas stem from the [Ubuntu Philosophy](https://en.wikipedia.org/wiki/Ubuntu_philosophy) and, if seen from another angle, could mean:\n\n> \"Ubuntu implies that everyone has different skills and strengths; people are not isolated, and through mutual support, [they can help each other to complete themselves](https://www.linkedin.com/pulse/open-source-enlightenment-2015-part-1-audrey-tang/).\"\n\nDuring the shadowing experience, I realized that it is easy to get comfortable with my own world and department-specific view, which can be very foreign to other teams. The reality is that we are all interconnected and each bit of group success is **our** success. Is there an incident? A bug? A delay in hiring? It affects not only the department [DRI](/handbook/people-group/directly-responsible-individuals/) (what we call at GitLab the Directly Responsible Individual), but it can have an impact on all of us, and it can be disguised in different ways. A reliability challenge is not only an engineering problem; if there is an unresolved issue that goes on for too long, it can end up hurting GitLab’s reputation and brand. The issue can impact not only the goals of engineering but also of other teams, including marketing.\n\nTo navigate this interconnectedness, treat all individual efforts as a consolidated unit efficiently and transparently. This is one of my key takeaways from the shadowing program: Having a fair amount of humanity, humbleness, and people-oriented skills is important. I went into this program assuming I was going to experience mostly hard, deterministic skills but the reality was very different.\n\n## A day in the life\n\nShadowing [Wayne Haber](/company/team/#whaber), director of engineering for Growth, Sec, and Data Science, is a unique experience, especially for someone who doesn't spend a lot of time with upper leadership at GitLab. Wayne begins the shadow week with a prep coffee chat where he walks you through what to expect from the week, some tips, and his general criteria for success in the program (take notes and offer feedback!).\n\nAs the week kicks off, you'll first notice you'll be taking part in meetings, a lot of meetings. This is not a bad thing. However, you are going to be treated to a backstage pass to what mission-critical meetings at GitLab look like, how relationships are developed, how KPIs are decided and set, and much more.\n\nDuring my time with Wayne, I attended a variety of meetings from skip levels to a 1:1 with Wayne's boss. In those meetings there were a lot of nuances to observe and an opportunity to soak up how our engineering directors apply [the CREDIT values](https://handbook.gitlab.com/handbook/values/). Wayne encourages people who take part to get involved in the meetings, be vocal, be willing to engage, take notes, and offer feedback. This is an atmosphere that helps to cultivate a sense of \"No Ego\" and promotes collaboration.\n\n## TL;DR\n\nI totally recommend taking one week to enjoy Wayne's adventures. It is an enriching and humbling opportunity to connect with colleagues that you might not come across if you are on another team. As mentioned before, we impact each other more than we might usually think!\n",[722,9,810],{"slug":2000,"featured":6,"template":680},"engineering-director-shadow","content:en-us:blog:engineering-director-shadow.yml","Engineering Director Shadow","en-us/blog/engineering-director-shadow.yml","en-us/blog/engineering-director-shadow",{"_path":2006,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2007,"content":2013,"config":2019,"_id":2021,"_type":14,"title":2022,"_source":16,"_file":2023,"_stem":2024,"_extension":19},"/en-us/blog/engineering-managers-automate-their-jobs",{"title":2008,"description":2009,"ogTitle":2008,"ogDescription":2009,"noIndex":6,"ogImage":2010,"ogUrl":2011,"ogSiteName":667,"ogType":668,"canonicalUrls":2011,"schema":2012},"How GitLab automates engineering management","At GitLab we know automation is engineering's best friend. Here's a deep dive into three scripts we use regularly to keep big projects on track.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/engineering-managers-automate-their-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab automates engineering management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Seth Berger\"}],\n        \"datePublished\": \"2021-11-16\",\n      }",{"title":2008,"description":2009,"authors":2014,"heroImage":2010,"date":2016,"body":2017,"category":299,"tags":2018},[2015],"Seth Berger","2021-11-16","\n\nAs an engineer, figuring out how to automate your work becomes an important aspect of your job. From writing powerful dotfiles, to customizing bash scripts, to writing robust and rigorous tests, engineers regularly look for ways to automate their repetitive work. \n\nAt GitLab, engineering managers are no different and are constantly looking for ways to automate their work. I asked engineering managers at GitLab to share their automation scripts and their responses were overflowing. \n\nFrom automating their [1:1 document creation](https://www.youtube.com/watch?v=gqFbZi8Hyoc), to integrating [GitLab with Google Sheets](https://gitlab.com/-/snippets/2200407), to writing utilities to [provide executive summaries](https://gitlab.com/gitlab-org/secure/tools/report-scripts), GitLab team members take advantage of the [rich API that GitLab](https://docs.gitlab.com/ee/api/) provides to organize the mountains of information that they sort through on a regular basis. \n\nFor this blog post, I’m sharing a [repo](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries) that contains just a few of the many scripts that our team members use. These scripts were originally written by engineering manager [Rachel Nienaber](/company/team/#rnienaber). Rachel’s Infrastructure team is tasked with the exciting work of coordinating large scale infrastructure and code improvements. The work involves coordinating and sequencing lots of issues and epics, and ensuring the work gets done at just the right time and in the right order. Because of the breadth and scale of the work, she has created a handful of scripts that parse issues and epics in order to gain better visibility into the work that needs to be done. \n\nIn the repo, there are three scripts. I’ll provide a quick overview of the first two, and then dive into the code on the last one. \n\n* [Issues not in epics ](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/issues_not_in_epics.rb)\n* [Epic summary](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/epic_summary.rb)\n* [Epic/Issue relationship ](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/epic_issue_relationships.rb)\n\n**Issues not in epics**\n\nSince the Infrastructure team leans on [epics](https://docs.gitlab.com/ee/user/group/epics/) to organize their issues, they also want to be able to organize work that may not be part of an epic. The [`issues_not_in_epics.rb`](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/issues_not_in_epics.rb) script iterates through issues not in an epic and updates the description of a single hard-coded [issue](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/538) with a table summarizing those issues. The script is run on a daily basis via a scheduled pipeline. This ensures that issues do not slip through the cracks. \n\n**Epic summary**\n\nThis script, [`epic_summary.rb`](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/epic_summary.rb), was written to solve the problem of having to look in multiple places to understand the status of each project. By grouping all status information into one place it’s easy to see what the team is working on, and what projects will be coming up next. \n\nAs input it takes a designated epic ID and updates the description of that epic by crawling sub-epics and extracting the following data from those epics:\n\n* The person responsible for delivering a sub-epic (at GitLab we use the term [Directly Responsible Individual or DRI](/handbook/people-group/directly-responsible-individuals/))\n* The latest status update for the epic as inputted by an engineer in an epic description\n* The number of sub-epics\n* Links to a board showing the issues constituting that epic\n\nYou can see an example of the output from the script on this [epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/148).\n\nPart of what makes this script simple is that the Infrastructure team always updates the bottom of all their epic descriptions with the following markdown.\n\n```markdown\n## Status {DATE}\n{commentary of the status}\n```\n\nBy consistently using that very simple markdown, the following snippet of code can reliably extract the status for each epic:\n\n```rb\n if description!= nil && description.index(\"## Status\")\n\n    end_location = description.length\n\n    if description.index(\"mermaid\")\n      end_location = description.index(\"mermaid\")-6\n    end\n\n    status = description[description.index(\"## Status\")+10..end_location]\n  end\n```\n\nThe code above certainly won’t win any algorithm challenges, but that’s kind of the point and what we aim to do with [boring solutions](/blog/boring-solutions-faster-iteration/). \n\nYou’ll notice the code above adjusts what is parsed to exclude a mermaid diagram that might appear after the `## Status` markdown.  That diagram gets maintained with the [epic_issue_relationship.rb](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/epic_issue_relationships.rb) script. \n\n**Epic issue relationship**\n\nThis script updates either a specific epic or all epics, depending on the command line option,  with a [mermaid diagram](https://mermaid-js.github.io/) that shows the relationship between issues and the order that those issues need to be completed by examining how they are related to one another. Adding a mermaid diagram to the description was introduced by [Sean McGivern](/company/team/#smcgivern), a staff engineer on the Scalability team. It creates brilliant diagrams like this one from this [epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/579).\n\n![Mermaid Diagram](https://about.gitlab.com/images/blogimages/2021-11-16-engineering-managers/issue_relation.png)\n\nLet’s walk through the code.\n\nThe script uses the Docopt gem to parse and accept several input parameters. \n\n```rb\noptions = Docopt::docopt(docstring)\ntoken = options.fetch('--token')\ngroup_id = options.fetch('--groupid')\nepic_id = options.fetch('--epicid', nil)\ndry_run = options.fetch('--dry-run', false)\n```\nThen a connection to the GitLab instance is created, taking advantage of the [GitLab gem](https://github.com/NARKOZ/gitlab) which is extended in [`lib/gitlab_client/epics.rb`](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/main/lib/gitlab_client/epics.rb) to include a few extra methods. \n\n```rb\nGitlab.configure do |config|\n  config.endpoint = 'https://gitlab.com/api/v4'\n  config.private_token = token\nend\n```\n\nIf an epic id is passed in, then the `update_mermaid` will run only for a specific epic. Otherwise, the code searches for epics that match the two labels, `workflow-infra::In Progress` and `team::Scalability` and are also `opened`. Only when the matching epics do not have child epics,  is `update_mermaid` run. \n\n```rb\nif epic_id\n  update_mermaid(token: token, group_id: group_id, epic_id: epic_id, dry_run: dry_run)\nelse\n  Gitlab.epics(group_id, 'workflow-infra::In Progress,team::Scalability', options: { state: 'opened' }).each do |epic|\n    if Gitlab.epic_epics(epic['group_id'], epic['iid']).count == 0\n      update_mermaid(token: token, group_id: group_id, epic_id: epic['iid'], dry_run: dry_run)\n    end\n  end\nend\n```\nFinally the most exciting part of the script is the method `update_mermaid` method. \n\nBelow the code sets up variables, and looks to see if a mermaid diagram exists in the epic description that it should populate. Note, that if a mermaid diagram does not exist in the epic already, this script will not create one. Each epic should already have a mermaid diagram placeholder inserted after the status header.\n\n```rb\ndef update_mermaid(token:, group_id:, epic_id:, dry_run:)\n  in_epic = Set.new\n  from_relations = Set.new\n  relations = Set.new\n  mermaid = ['graph TD']\n  original_description = Gitlab.epic(group_id, epic_id).description\n\n  unless original_description =~ MERMAID_REGEX\n    puts \"#{epic_id} does not have a Mermaid diagram\"\n    return\n  end\n```\n\nNext the code iterates through each of the issues in the epic and assigns a graph_id for each issue that will be part of the mermaid diagram. It also adds the `key_fields` to the `in_epic` Set. The code assigns `title` along with an emoji so that the mermaid diagram is visually richer. After that the graph nodes are added to the mermaid diagram. \n\n```rb\n Gitlab.epic_issues(group_id, epic_id).each do |issue|\n    iid = issue['iid']\n    graph_id = id(issue)\n\n    in_epic \u003C\u003C key_fields(issue)\n\n    title = \"##{iid}\"\n    title = \"🎯 #{title}\" if issue['labels'].include?('exit criterion')\n    if issue['state'] == 'closed'\n      title = \"✅ #{title}\"\n    elsif issue['assignees'].any?\n      title = \"⏳ #{title}\"\n    end\n\n    mermaid \u003C\u003C \"  #{graph_id}[\\\"#{title}\\\"]\"\n    mermaid \u003C\u003C \"  click #{graph_id} \\\"#{issue['web_url']}\\\" \\\"#{issue['title'].gsub('\"', \"'\")}\\\"\"\n\n```\nAfter adding the graph nodes above, the code iterates through the links associated with each issue. The code determines if the issue is blocked by or blocks another issue. Knowing the direction of this relationship defines which direction the arrow in the mermaid diagram should point.  \n\nThe code also adds both the issue and link to the `from_relations` set, which will automatically deduplicate entries.\n\n```rb\n    Gitlab.issue_links(issue['project_id'], issue['iid']).each do |link|\n      case link['link_type']\n      when 'is_blocked_by'\n        source = id(link)\n        destination = graph_id\n      when 'blocks'\n        source = graph_id\n        destination = id(link)\n      else\n        next\n      end\n\n      from_relations \u003C\u003C key_fields(issue)\n      from_relations \u003C\u003C key_fields(link)\n\n      unless relations.include?([source, destination])\n        mermaid \u003C\u003C \"  #{source} --> #{destination}\"\n        relations \u003C\u003C [source, destination]\n      end\n    end\n```\n\nFinally, the code looks at the “extra” issues, which are issues that are not directly part of the epic, but are related to issues in the epic. These are the most important issues to ensure are on the diagram, since they represent issue dependencies that are outside the epic and would otherwise not show up when viewing an epic page in GitLab. \n\nThe code then updates the epic description by calling the GitLab API and setting the new description. \n\n```rb\n  (from_relations - in_epic).each do |extra_issue|\n    mermaid \u003C\u003C \"  #{id(extra_issue)}[\\\"❌ ##{extra_issue['iid']}\\\"]\"\n    mermaid \u003C\u003C \"  click #{id(extra_issue)} \\\"#{extra_issue['web_url']}\\\" \\\"#{extra_issue['title'].gsub('\"', \"'\")}\\\"\"\n  end\n\n  mermaid_string = mermaid.join(\"\\n\")\n  new_description = original_description\n                        .gsub(MERMAID_REGEX,\n                              \"\\n\\\\1\\n```mermaid\\n#{mermaid_string}\\n```\\n\")\n\n    Gitlab.edit_epic(group_id, epic_id, description: new_description)\nend\n```\n\nThe above scripts help engineering managers efficiently know about all the issues their team members are working on, the status of their team’s epics and how all the work fits together.  \n\nThe scripts only rely on team members doing two things manually: \n\n* Updating an epic’s status on a periodic basis\n* Creating relationships between related issues.  \n\nThe scripts can be run as part of a regular scheduled [pipeline](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/main/.gitlab-ci.yml). With the reports generated on a scheduled basis, engineering managers can regularly get summarized information that helps make them and their teams more productive.\n",[723,9,811],{"slug":2020,"featured":6,"template":680},"engineering-managers-automate-their-jobs","content:en-us:blog:engineering-managers-automate-their-jobs.yml","Engineering Managers Automate Their Jobs","en-us/blog/engineering-managers-automate-their-jobs.yml","en-us/blog/engineering-managers-automate-their-jobs",{"_path":2026,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2027,"content":2033,"config":2038,"_id":2040,"_type":14,"title":2041,"_source":16,"_file":2042,"_stem":2043,"_extension":19},"/en-us/blog/epics-roadmap",{"title":2028,"description":2029,"ogTitle":2028,"ogDescription":2029,"noIndex":6,"ogImage":2030,"ogUrl":2031,"ogSiteName":667,"ogType":668,"canonicalUrls":2031,"schema":2032},"Coming in 11.3: Seamless top-down and bottom-up planning with epics and roadmap","See how you can plan and track larger initiatives even more easily with milestone dates integrated into epics.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672146/Blog/Hero%20Images/epics-issues-milestones-planning.jpg","https://about.gitlab.com/blog/epics-roadmap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Coming in 11.3: Seamless top-down and bottom-up planning with epics and roadmap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Wu\"}],\n        \"datePublished\": \"2018-08-23\",\n      }",{"title":2028,"description":2029,"authors":2034,"heroImage":2030,"date":1515,"body":2036,"category":299,"tags":2037},[2035],"Victor Wu","\n\n[Epics](https://docs.gitlab.com/ee/user/group/epics/) and [roadmap](https://docs.gitlab.com/ee/user/group/roadmap/)\n are two newer features in [GitLab Ultimate](/pricing/) and [GitLab.com Gold](/pricing/#gitlab-com). Used together, your team\n can plan and track larger initiatives. On September 22, we're shipping a new feature\n which we will help you transition seamlessly between top-down and bottom-up planning.\n\n## First things first: epics vs. issues vs. roadmap\n\nAn epic is similar to an [issue](https://docs.gitlab.com/ee/user/project/issues/) in that it\nrecords a proposed scope of work to be done, allows for team members to discuss that scope,\nand then is tracked and updated over time as that work is actually implemented.\n\nHowever, an epic exists at the [group](https://docs.gitlab.com/ee/user/group/index.html) level (as opposed to an issue, which exists at the [project](https://docs.gitlab.com/ee/user/project/index.html) level). So\nimmediately you see that an epic is designed to reflect a larger scope, and higher level of discussion\ncompared to an issue. Additionally, you can [attach any number of issues to an epic](https://docs.gitlab.com/ee/api/epic_issues.html#assign-an-issue-to-the-epic), with the idea that\nthe epic's scope decomposes into those individual issues.\n\n![epic](https://about.gitlab.com/images/blogimages/epic-view.png){: .shadow.medium.center}\n\nSince an epic is designed to scope work over a longer period of time (several issues' worth),\na timeline-based view in the form of a [roadmap](https://docs.gitlab.com/ee/user/group/roadmap/)\n is also useful: it serves as a visualization to anticipate that work, and track it as it's\n progressively completed. So the roadmap, also scoped at the group level, presents all the\n epics in time for that group.\n\nYou can apply [group labels](https://docs.gitlab.com/ee/user/project/labels.html#project-labels-and-group-labels)\n to epics, making it easy to quickly narrow down to the epics you care about, whether you\n are looking at a list view or a roadmap view.\n\n| Epics list | Roadmap |\n| --- | --- |\n| ![roadmap](https://about.gitlab.com/images/blogimages/epic-list-view.png){: .shadow} | ![roadmap](https://about.gitlab.com/images/blogimages/roadmap-view.png){: .shadow} |\n\n## Long-term vs short-term planning\n\nWhen planning any initiative, uncertainty, by definition, increases further out in\nthe future. You don't know how many resources you will have. You don't know if previous\ndependent work will be finished. You don't know if the market and your customers will change\nsuch that you won't even need that planned out initiative at all.\n\nConversely, the nearer-term future is much more certain. You have a good handle of the work\nthat should be accomplished and that it can be completed within the next few weeks, up to a\nmonth or so.\n\nAnd of course, the work you are doing now, and have already completed in the past, has zero\nuncertainty. You can't change the past.\n\nEpics and roadmap help you plan and track work in all these cases:\n\n### Long-term future: top-down planning\n\nWhen planning far in the future, we use _top-down planning_. We have strategic initiatives\nthat we want to achieve, with approximate scope and timelines. So in this case, you would\ncreate an epic, and assign `Fixed` dates (a planned start date and planned finish date) to it.\nThe epic would appear in the roadmap view, and you would be able to see it positioned further\nin the future.\n\nThis helps high-level planning, such as starting discussions with various departments in\nyour organizations, or presenting a strategic roadmap to your executive leadership. By creating the\nepic early on, it provides a collaborative space for all stakeholders to discuss feasibility\nand further detailed ideas.\n\n### Short-term future: bottom-up planning\n\nWhen planning for the nearer-term future, we use _bottom-up_ planning. So suppose the epic\nyou created previously with fixed dates has gained some traction within your organization.\nPeople are excited about the prospects and want to flesh out detailed designs and implementation\nsteps. You and your team would then start creating issues and attach them to the epic.\n\nEventually, you have scoped out the detailed work in the issues and even assigned milestones to them,\nindicating when they are planned to be worked on. Now, instead of having to manually update the epic\nto reflect the milestone dates, you would simply choose `From milestones` in the epic sidebar. In this\ncase, the epic planned start date becomes a dynamic date reflecting the earliest start date across all\nthe epic's assigned milestones. The same goes for the epic's planned end date too.\n\nThis functionality is coming in GitLab 11.3 – you can [view the original issue here](https://gitlab.com/gitlab-org/gitlab-ee/issues/6470).\n\nAdditionally, the [roadmap bar edges will reflect the fixed or dynamic start and end dates](https://gitlab.com/gitlab-org/gitlab-ee/issues/6471) accordingly.\n\n![inherited-dates](https://about.gitlab.com/images/blogimages/inherited-dates.png){: .shadow.medium.center}\n\nSo with this design, you are in control when you want to seamlessly transition an epic from a\ntop-down planning scenario, to a bottom-up one. The roadmap reflects these dates automatically too,\nso that all your epics are shown together in one view.\n\nPhoto by [Christopher Machicoane-Hurtaud](https://unsplash.com/photos/ewZkOqjl2Ys?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/ewZkOqjl2Ys?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[831,677,9,723],{"slug":2039,"featured":6,"template":680},"epics-roadmap","content:en-us:blog:epics-roadmap.yml","Epics Roadmap","en-us/blog/epics-roadmap.yml","en-us/blog/epics-roadmap",{"_path":2045,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2046,"content":2052,"config":2058,"_id":2060,"_type":14,"title":2061,"_source":16,"_file":2062,"_stem":2063,"_extension":19},"/en-us/blog/evolution-of-zero-trust",{"title":2047,"description":2048,"ogTitle":2047,"ogDescription":2048,"noIndex":6,"ogImage":2049,"ogUrl":2050,"ogSiteName":667,"ogType":668,"canonicalUrls":2050,"schema":2051},"The evolution of Zero Trust","Zero Trust may be one of the hottest topics in security today, but it's not exactly new. Here's a history.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664096/Blog/Hero%20Images/evolution-of-zero-trust.jpg","https://about.gitlab.com/blog/evolution-of-zero-trust","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The evolution of Zero Trust\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-04-01\",\n      }",{"title":2047,"description":2048,"authors":2053,"heroImage":2049,"date":2054,"body":2055,"category":720,"tags":2056},[1574],"2019-04-01","\nUpdate: This is part 1 of an ongoing [Zero Trust series](/blog/tags.html#zero-trust). See our next post: [Zero Trust at GitLab: Problems, goals, and coming challenges](/blog/zero-trust-at-gitlab-problems-goals-challenges/).\n{: .alert .alert-info .note}\n\nI was not at the 2019 [RSA Conference](https://about.gitlab.com/events/rsa/) this year, so I asked my friends and colleagues what it was like and if they enjoyed themselves.\nNearly every person mentioned the phrase \"Zero Trust Networking\" during their recounting of events, and the vast majority of them seemed worn down with the phrase by the end of the conference.\nSeveral mentioned it was the \"hot topic\" – the term ‘Zero Trust’ actually made the RSAC Buzzwords Top 3 list.\nI have a few thoughts on the subject, because it is a solid way to move forward in the security realm, but I also wanted to remind people that this is not some new thing that came up this year – this is a concept whose roots stretch back a few decades.\nI also wanted to point out that Zero Trust will not end attacks, as attacks never end.\n\nThis is the first of a series of blog posts from the security team here at GitLab explaining Zero Trust and how we are tackling it.\nBut for these discussions to make sense, we need to show some perspective, so first, a bit of a history lesson.\nThere were three major shifts that brought about Zero Trust, all building upon each other.\nLet’s cover them, one by one.\n\n## First shift: Death of the perimeter\n\nBack in the early days of the internet, if you wanted to attack a target network, you would do a bit of reconnaissance and discover things like hostnames and IP ranges.\nYou would probe, find the available services on these target hosts, then begin trying to compromise them.\nThis was because the individual host systems were fairly wide open.\nSystem administrators needed a way to limit access to the servers and workstations under their control, while allowing legitimate access to users. Remote workers were rare, as the bulk of users were in an office building together.\nSo the [network firewall](https://en.wikipedia.org/wiki/Firewall_(computing)) was born in the early 1990s, restricting access between an organization’s internal network and the internet.\n\nAttackers were accustomed to [port scanning](https://en.wikipedia.org/wiki/Port_scanner) the target, finding the various services, and taking their pick of which service to attack.\nTo adapt to the newly installed firewall, attackers began to focus on the services that were allowed through the firewall. Back then, [organizations still controlled their own servers](https://en.wikipedia.org/wiki/DMZ_(computing)), running things like DNS, email, and web services.\nThese types of common services required holes be punched in the firewall to allow legitimate traffic to them, and so the attackers simply came in with the legitimate traffic.\n\nAt the same time, desktop operating systems and corporate applications began to move toward interacting and sharing information with each other, and as system administrators felt a level of control with the firewall, no one really pushed back very hard against these various operating systems and their noisy applications.\nIn fact, using those same firewall rules, it was possible to allow customers, business partners, and vendors a bit more access to the precious internal network by creating large holes to allow the access.\nThis meant if the attacker could figure out who your trusted partners were, they could compromise them and then come in through the large hole created for those same trusted partners.\n\n>This meant if the attacker could figure out who your trusted partners were, they could compromise them and then come in through the large hole created for those same trusted partners.\n\nIt became common knowledge that once an attacker got a foothold into that internal network, it was usually quite easy to move about within the organization.\nThe attackers adapted. The firewall lost a lot of its value, and to many attackers it became meaningless.\n\nI remember meeting [Bill Cheswick](https://en.wikipedia.org/wiki/William_Cheswick) (one of those early pioneers that helped bring about the firewall) at a security conference, and I was able to corner him and talk shop.\nSomething both of us gravitated towards was this concept of how the infamous \"network perimeter\" was basically an illusion.\nIt _could_ work, but not without changing a serious amount of tech to make it happen.\nHow did each of us secure our respective home systems?\n[Hardening each system individually](https://en.wikipedia.org/wiki/Bastion_host), and just eliminating the concept of the perimeter.\nSure, we both kept a perimeter, but it was maintained with a few router rules, and was more like a white picket fence than a castle wall. To us, the network perimeter was dead.\n\n>Sure, we kept a perimeter, but it was more like a white picket fence than a castle wall. To us, the network perimeter was dead.\n\nThis was a common topic among security practitioners and network administrators at the time, all of us discussing and arguing the fine points the same way Cheswick and I did.\nWe needed some way to deal with the attacker since the perimeter was dead or dying.\nThe concept of Zero Trust networking was born.\nThis started as rumblings during the early 2000s and came into an actual concept of sorts through the [Jericho Forums](https://en.wikipedia.org/wiki/Jericho_Forum) in 2004, and by 2010 or so it even had a name.\nBut I am getting ahead of myself. Other things were happening.\n\n## Second shift: The cloud\n\nGetting [slashdotted](https://en.wikipedia.org/wiki/Slashdot_effect).\n[Distributed denial of service attacks](https://en.wikipedia.org/wiki/Denial-of-service_attack).\nJust not having the bandwidth on your internet-connected web server in your data center to handle the traffic.\nThis internet thing was really taking off, and the World Wide Web was driving it.\n A few companies figured out clever ways to provide server services for organizations all over the globe, and were known as [Content Distribution Networks](https://en.wikipedia.org/wiki/Content_delivery_network) (CDNs), and CDNs gave these organizations a way to upload web content to these servers.\n Even though content might be replicated across the CDN’s dozens of data centers world wide, it was one single entity as far as a typical website visitor was concerned.\n\nNot only could you upload your corporate web server to the CDNs, after a while you could basically pay for virtual servers that you could use for any purpose.\nAs web servers developed and web apps become more ambitious, some companies offered up their services to other companies, some even broke out of the \"web app\" mold and began to offer robust services that replaced desktop applications.\n\n### [The cloud](https://en.wikipedia.org/wiki/Cloud_computing) had arrived.\n\nNot everyone liked the cloud, in fact many organizations were quite resistant to it at first. Others immediately saw the value in it and moved everything to the cloud.\n\nAttackers did what they did best: they adapted. People new to the cloud would often get permissions wrong and expose sensitive data.\nAny bad coding practices they had before the cloud were just uploaded anyway as the cloud didn’t magically fix bugs.\nMoving poorly-coded services in the cloud meant even more holes in firewalls if old legacy data was still stored “on prem”.\nHowever, more often than not it meant these services and the insecure methods used to reach its data was simply moved up to the cloud, sometimes with even more exposure.\nAttackers got to know how these new technologies worked and understood the flaws that existed in the implementations and kept on compromising systems.\n\nWhile the cloud shift created its fair share of upheaval,  it certainly set the stage for the third major shift.\n\n## Third shift: Mobility\n\nWorking remotely? We'd had dial up networking via modem at first, followed by the infamous VPN.\nAs one might imagine, this was an obvious one that certainly bypassed a firewall on a network perimeter. Knowing usernames and passwords had always been a goal of attackers, and if they managed to obtain that information they could certainly plug it into a VPN for access.\n\n### To help protect the username and password, [Two Factor Authentication](https://en.wikipedia.org/wiki/Multi-factor_authentication) (2FA) came about.\n\nThe infamous RSA token was technology I encountered ages ago, and it was certainly all the rage during the first decade of this century.\nMy first encounter was when using a VPN in the late '90s.\nA decade later when I worked for MITRE, I carried no fewer than four RSA tokens (not unheard of at the time for many organizations!) for not just remote access, but for special access to projects funded by different government agencies.\nYou were outside that perimeter and needed in, but as users and their passwords were considered a security risk for any number of reasons (poor password hygiene, easily-fooled help desk personnel responsible for resets, etc.), this direct and open exposure of the internal network via the VPN was too insecure.\nSomething you know (the password) and something you have (that RSA token with its changing six-digit number) made it way more difficult for attackers to get in.\n\nOver 20 years ago, everyone had a desktop machine, but those road warriors that travelled for business would be issued a second system – a laptop.\nThis shifted as it made sense to give all of the employees laptops, and the more expensive desktop systems were only issued by those doing specific jobs that required the extra desktop horsepower.\n\nThe phone also helped push forward the mobility concept, as it expanded from a telephone with internet access to a small internet-connected computer loaded with cloud-based apps that also works as a telephone.\n\n### We became mobile.\n\nEither through SMS messaging, an \"authentication app\" that did TOTP, or a full-fledged 2FA app that supported push technology, the phone became the \"something you have\" and essentially killed the old RSA token.\nAnd of course something else happened with all this mobility, it increased the ability for one to work from anywhere.\nMost of those \"Whatever as a Service\" apps were using web-based protocols to communicate to their Cloud presence, and we'd figured out how to log a person in and do 2FA ages ago.\nThere was no need for a perimeter for the basic end user in an organization.\n\nThis was a slow build to a large upheaval in information security.\nBut what really drove home the big security issues of this brave new world was an event.\nThe culmination of our three major shifts – a teaching moment, as they say.\n\n## The big teaching moment\n\nWhat was the big teaching moment?\n\n### The obvious answer everyone talks about is [Operation Aurora](https://en.wikipedia.org/wiki/Operation_Aurora).\n\nThis was the breach at Google that got them to take a look at this whole Zero Trust thing, build their version of it called [BeyondCorp](https://cloud.google.com/beyondcorp/), and begin to implement it internally.\nIn 2014 Google began to publish information about it.\n\nGoogle had been targeted by [PLA Unit 61398](https://en.wikipedia.org/wiki/PLA_Unit_61398).\nI recognized PLA Unit 61398 from my defense contractor days as “Comment Crew,” as one of their backdoor programs that would make innocent-looking web queries to a Comment-Crew-controlled web server, and obfuscated comments in the HTML returned to the backdoor were actually commands for the backdoor to carry out.\nThey targeted a lot of organizations from large corporations to defense contractors to U.S. government agencies.\n\nThe press at the time had a lot of quotes from security experts pooh-poohing the whole [Advanced Persistent Threat](https://www.fireeye.com/current-threats/apt-groups.html) (APT) thing, claiming that APT attacks weren’t sophisticated as the \"advanced\" part of APT implied.\nHowever, most of these people had either never played offense, or they didn't deal with APT as a part of daily life.\nI distinctly remember the Google attack because during that same timeframe, Comment Crew’s attack was repeated against my employer and others. We were not breached in that case and we probably called it “a typical Tuesday,” but many naysayers in the security community finally had to admit that APT was in fact real.\n\n### But a _huge_ teaching moment was the [RSA hack in 2011](https://www.wired.com/2011/08/how-rsa-got-hacked/).\n\nAgain, maybe not the most sophisticated of attacks to gain entry ([phishing](https://en.wikipedia.org/wiki/Phishing) email), but it was just enough to gain a foothold.\nOnce inside, they pivoted and managed to compromise RSA in what was one of the worst ways possible.\nPeople argue about exactly what level of compromise they achieved, but in the end the attackers could program up their own tokens to allow bypass of RSA SecurID implementations at RSA customer locations.\n\nOne important point to make here – 2FA was an extremely important protection mechanism for organizations like the U.S. Government and all of its many defense contractors.\nAPT actors targeted things like documents pertaining to research, plans involving various defense technologies, and credentials for regaining access if their intrusion was discovered and the APT actors were shut out.\nSince those credentials were protected by 2FA via RSA SecurID tokens, complete panic ensued. _Millions_ of tokens had to be manufactured, provisioned, and deployed to customers who had to configure their systems and deploy them internally.\nDuring this time all organizations still had to function, and APT-sponsored attacks against targets that took advantage of the stolen RSA technology began to appear.\n\nThe basic corporate network at the time was still mainly perimeter-based, even though their perimeter was full of holes, allowing everything from remote users to trusted vendors, partners, and customers.\n\n> The cloud was there, but many companies had their feet in both worlds.\n\nThe cloud was there, but many companies had their feet in both worlds. They would often make architectural choices on technology based upon getting systems to just talk to each other and allow data access _without_ fully considering security issues.\nThe user population was increasingly mobile and, by its very nature, was pushing solutions to the absolute limit.\nAnd now, the one thing that at least protected access to it all – a layered security approach to credentials – was compromised.\n\n## Enter Zero Trust\n\nBeyondCorp was Google’s answer to the threat they faced – a sophisticated adversary that took advantage of their employees and gained privileged access to sensitive assets.\nGoogle published a lot of the material they developed, thinking it would help others deal with the same situation.\nFor those of us in the more threatened world of government agencies and government contractors, we didn’t give Google’s BeyondCorp a second thought.\nWe had defenses, we’d learned how to deal with these type of attackers, we’d even dealt with Comment Crew ourselves and could keep them at bay.\n\nThe RSA breach was a different scenario. An area of trust – 2FA – was completely compromised.\nRSA didn’t run out and build BeyondCorp, but it certainly inspired a large number of people to start looking for answers, and Zero Trust really began to check many of the boxes to add in the protections we needed.\nIn essence, the RSA event gave us a reason to implement Zero Trust.\nWe needed more than 2FA, more than inventory control, more than patch management, we needed to be able to establish a trusted environment and could not with the way things were.\n\n### Essentially, it boils down to this: Zero Trust assumes you do not trust the user nor the user’s device.\n\nThe user has to prove that they are who they say they are and that they meet policy requirements to perform the actions they are wanting to perform.\nThe device has to prove that it is what is says it is, including patch levels.\nEven automated processes such as systems that communicate between each other have to prove themselves as well.\nThe transaction should be valid and the processes are allowed to perform the actions they are performing.\nThis means any information in transition needs to be encrypted using secure algorithms, all transactions are signed and signatures validated, and there is a secure audit trail to ensure all parts of the operation can be examined.\n\n### Are we there yet with Zero Trust?\n\nNo. In fact, the hard part isn’t so much the implementation of it, it is getting it implemented everywhere. Most Zero Trust solutions address a lot of the concerns of the past, but they are not perfect by any means.\nMany organizations will be living in “mixed” environments of old and new for quite a while.\nThe applications that implement the raw components of Zero Trust need to be secure.\nThere will be various policy decisions on how to act on various accesses and requests involving users, devices, services, and data that if not properly defined could result in the wrong employee gaining access to sensitive material.\nAnd of course we will always face a clever adversary trying to bypass, break, and compromise whatever security controls are put in place.\n\nAt least with Zero Trust, we have a leg up. In the forthcoming [series of blog posts](/blog/tags.html#zero-trust), we’ll share GitLab’s story with Zero Trust.\nGitLab is a cloud native, all-remote company with employees from more than 50 countries.\nWe also strive to be as open as we can be about how we work.\n\nWe invite you to follow our journey and contribute your thoughts, questions and experiences around Zero Trust along the way.\n\nPhoto by [Matthew Henry](https://unsplash.com/photos/fPxOowbR6ls?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/security?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,720,2057],"zero trust",{"slug":2059,"featured":6,"template":680},"evolution-of-zero-trust","content:en-us:blog:evolution-of-zero-trust.yml","Evolution Of Zero Trust","en-us/blog/evolution-of-zero-trust.yml","en-us/blog/evolution-of-zero-trust",{"_path":2065,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2066,"content":2072,"config":2080,"_id":2082,"_type":14,"title":2083,"_source":16,"_file":2084,"_stem":2085,"_extension":19},"/en-us/blog/five-fast-facts-about-docs-as-code-at-gitlab",{"title":2067,"description":2068,"ogTitle":2067,"ogDescription":2068,"noIndex":6,"ogImage":2069,"ogUrl":2070,"ogSiteName":667,"ogType":668,"canonicalUrls":2070,"schema":2071},"Five fast facts about docs as code at GitLab","Here are five fast facts about how GitLab technical writers use GitLab in a docs-as-code workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660257/Blog/Hero%20Images/pen.jpg","https://about.gitlab.com/blog/five-fast-facts-about-docs-as-code-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Five fast facts about docs as code at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suzanne Selhorn\"},{\"@type\":\"Person\",\"name\":\"Susan Tacker\"},{\"@type\":\"Person\",\"name\":\"Diana Logan\"}],\n        \"datePublished\": \"2022-10-12\",\n      }",{"title":2067,"description":2068,"authors":2073,"heroImage":2069,"date":2077,"body":2078,"category":787,"tags":2079},[2074,2075,2076],"Suzanne Selhorn","Susan Tacker","Diana Logan","2022-10-12","\n\nAt GitLab, we use GitLab as our single platform to document GitLab by using a “docs-as-code” workflow. Sound confusing? \n\nThe GitLab technical writing team uses GitLab to plan, create, review, edit, and publish the [GitLab documentation](http://docs.gitlab.com). And because we use the docs-as-code workflow, we can produce a large amount of content with a small, passionate, efficient team.\n\nIf you aren’t familiar with docs as code, here’s a quick definition: \n\n[Docs as code](https://idratherbewriting.com/trends/trends-to-follow-or-forget-docs-as-code.html#what-is-docs-as-code) is a way to develop and publish product documentation. It uses the same tools and processes as software code development, placing the documentation files along with the code files in a repository for version control. \n\nIf you are wondering whether your organization could adopt a docs-as-code workflow in GitLab, read on for five fast facts that help explain how our team does it.\n\n## We use GitLab to plan both GitLab features and docs content updates\n\nOur product managers, UX designers, engineers, and quality assurance teams work together to plan our feature work. Maybe when you’re planning releases, you use a Kanban board, or you create issues in a third-party tool.\n\nAt GitLab, we use epics and [issues](https://gitlab.com/gitlab-org/technical-writing/-/issues/680) to plan our work, and [issue boards](https://gitlab.com/groups/gitlab-org/-/boards/4340643?label_name%5B%5D=Category%3ADocs%20Site) to track our progress. We value transparency, so all of this information is available to everyone, including discussions about planning. The tech writing team has visibility into the status of development at any time.\n\n![planning issue](https://about.gitlab.com/images/blogimages/planning_issue.png)\n\nIf we have larger doc efforts, we track them in GitLab, make the changes by using GitLab, and mark issues as done in GitLab. If a year passes and we want to remember why we made a change, we search GitLab and find who made the change and why. If you’re working in many different tools right now, imagine what it would be like to view everything in one place. Everything feels faster and more efficient. You skip the time you’d normally spend going through emails and websites and Slack to find lost discussions. It’s all here in GitLab.\n\nAnd if you love your wiki and don’t want to go without it, we have a wiki feature too.\n\n## We use GitLab to give and receive feedback on the docs\n\nIf you’ve been a writer for any amount of time, you know what a pain it can be to get people to review your content.\n\nAt GitLab, our developers write the first draft of content for all our new features. They save the content in the same repository as their code. Feature documentation is part of our development “definition of done.” They assign the draft content to our writers, who review it, add suggestions, and send their ideas and edits back to the authors.\n\nThe writers themselves also open merge requests (MRs) for content changes. And no matter who opens the MR (the writer, a developer, a support engineer, a community contributor), we all have the ability to easily comment on each other’s work.\n\nIn a merge request, it’s as simple as selecting a Suggestion button. You can comment on one line or several. You can provide changes or edits, and the person who authored the merge request can easily apply your change, or create their own competing suggestion, and you can discuss it. To invite others to the conversation, you can type their username in a comment, and they see your comment as a to-do item in GitLab. In this way, you can discuss any change. It’s transparent and inclusive.\n\n![making a suggestion](https://about.gitlab.com/images/blogimages/suggestion.png)\n\nBecause the doc content is in markdown, which is similar to plain text, it’s easy to view the differences between file versions, and to see who committed which change.\n\nMaybe you’ve worked in places where reviews were done in PDFs, or Word docs, or Google docs with comments. When you try this workflow, you'll see how much more efficient the process is. No one is passing around outdated versions of documents. No one is making updates that inadvertently wipe out someone else’s comments.\n\nAnd if anyone ever wants to know why we made a change, it’s easy to view the history of the page or even view who is to “blame” for a specific line. \n\n![who to blame?](https://about.gitlab.com/images/blogimages/blame.png)\n\nYou don’t have to store versions of a PDF document and try to search for who suggested which change. It’s all in GitLab.\n\n## We use GitLab to preview the docs content\n\nAt GitLab, we have tools to generate the docs site content locally, but you can also easily share a view of the docs site right from a merge request. If you’re playing with an idea and you want to show someone, you open a merge request, generate what we call “a review app” and voila, the changed docs site is available at a publicly available URL.\n\n![the review app](https://about.gitlab.com/images/blogimages/view_app.png)\n\nYour changes are visible, and you can iterate on them or commit as-is. Which brings us to another one of the most useful features we have at GitLab.\n\n## We use GitLab to test every content change\n\nMaybe you’re using a third-party tool to test the links in your docs, or to check spelling and grammar rules.\n\nWe are using third-party tools (Nanoc for links, Vale for spelling and grammar), but like everything else, these tools can be incorporated into GitLab, and into the writer workflow.\n\nEach writer has our tools installed locally and can view everything, from the document’s reading level to passive and active voice fixes on their local machine. But for those contributors who don’t have the toolset, we run a version of our tests in a pipeline as part of every commit.\n\n![a lint error](https://about.gitlab.com/images/blogimages/lint_error_2.png)\n\nIf you’re a developer and you don’t consider yourself to be an expert writer, you might find that the pipeline failed on your merge request because of an important grammar or branding rule. We’ve defined a list of many rules, and assigned levels of importance to them. So not only do we have a [style guide](https://docs.gitlab.com/ee/development/documentation/styleguide/) and [word list](https://docs.gitlab.com/ee/development/documentation/styleguide/word_list.html), but we also run tests to ensure our content doesn’t stray too far from those rules.\n\n## We use GitLab to generate the HTML output and we host the output on GitLab Pages\n\nOur CI/CD pipeline converts our markdown content and compiles it into HTML. Then we host this output on GitLab Pages, at the [docs.gitlab.com](http://docs.gitlab.com) website.\n\n![the pipeline](https://about.gitlab.com/images/blogimages/pipeline2.png)\n\nHaving the output generated by a pipeline means that we can update the docs site whenever we want. While the product is released once a month, we update the docs site once every hour. That means docs.gitlab.com always contains the most up-to-date content available, sometimes even pre-release information. Since the development planning and implementation issues are typically open to the public as part of our transparency value, pre-announcing features isn’t an issue. \n\nSo as you can see, for a multitude of reasons, we love our docs-as-code workflow. It can be an adjustment to transition to one tool for all of your doc needs, but GitLab supports the full writer workflow, no matter who writes your content. And we know, because we’ve been using it for years. \n\nLearn more about the tech writing docs-as-code work at GitLab:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZlabtdA-gZE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nTo learn more about contributing to our open source documentation, check out our instructions in “[How to update the docs](https://docs.gitlab.com/ee/development/documentation/workflow.html#how-to-update-the-docs).” We welcome your contributions!\n",[810,767,9],{"slug":2081,"featured":6,"template":680},"five-fast-facts-about-docs-as-code-at-gitlab","content:en-us:blog:five-fast-facts-about-docs-as-code-at-gitlab.yml","Five Fast Facts About Docs As Code At Gitlab","en-us/blog/five-fast-facts-about-docs-as-code-at-gitlab.yml","en-us/blog/five-fast-facts-about-docs-as-code-at-gitlab",{"_path":2087,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2088,"content":2094,"config":2102,"_id":2104,"_type":14,"title":2105,"_source":16,"_file":2106,"_stem":2107,"_extension":19},"/en-us/blog/five-signs-you-should-think-bigger",{"title":2089,"description":2090,"ogTitle":2089,"ogDescription":2090,"noIndex":6,"ogImage":2091,"ogUrl":2092,"ogSiteName":667,"ogType":668,"canonicalUrls":2092,"schema":2093},"Five signs you should think BIGGER!","Are you a designer who is frustrated with only focusing on the next milestone? Do you feel like you have to answer too many questions in every Issue? Do you feel like your product is not making any progress? **Time to Think Bigger!**","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099620/Blog/Hero%20Images/Blog/Hero%20Images/insights_insights.png_1750099620265.png","https://about.gitlab.com/blog/five-signs-you-should-think-bigger","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Five signs you should think BIGGER!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Iain Camacho\"}],\n        \"datePublished\": \"2021-03-30\",\n      }",{"title":2089,"description":2090,"authors":2095,"heroImage":2091,"date":2097,"body":2098,"category":2099,"tags":2100},[2096],"Iain Camacho","2021-03-30","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nAs a designer, it’s difficult to balance the scale of initiatives: Design too small, and nobody is excited or can understand the direction things are going. Start too big and everyone on the team may be too intimidated to start. ThinkBIG is a way of utilizing designers’ natural skillset to balance the iterative nature of engineering with the visionary nature of design. \n\nHere are 5 signals that you should switch up your style and Think Bigger: \n\n### 1) Every milestone is spent only prepping the next\n\n#### Signal\n\nWe’ve all been there. The next milestone planning issue is starting to get filled out and you, the designer, are realizing how many issues need design in order to be ready. As the priorities shift, you know the last two weeks of this milestone will be spent desperately trying to design mockups for engineers to start working on days later. I like to call this “Feeding the sharks”. It describes a certain level of panic some designers feel every milestone: If I don’t deliver enough, I might get chomped! \n\n#### Solution\n\nThinkBIG focuses on creating a larger-scale vision that can be iterated on as we go. This means that each design you put together leads to many independent issues engineers can work on. For a designer, this increases [results](https://handbook.gitlab.com/handbook/values/#results) by delivering one design worth many issues. \n\n### 2) Engineers are asking _a lot_ of questions\n\n#### Signal\n\nHave you ever started a new milestone and as engineers get started, they have a million questions detailing every possible state, permutation, and example that they should account for? This line of questioning means you, the designer, now need to make a myriad of new designs with only minute changes between them. This is not an [efficient](https://handbook.gitlab.com/handbook/values/#efficiency) use of the designer’s time. \n\n#### Solution\n\nFirst off, all these questions are valid and decisions that need to be made. By Thinking Bigger, engineers are better prepared to handle all the edge cases independently because they walk into their work with a fuller context of the impact on users.  This enables empathy-driven engineering, allowing engineers to lead the conversation around edge-cases with solutions in mind, instead of needing it to be defined ahead of time. By pushing the edge cases further down the product development lifecycle, there is also a unique opportunity for product, design, and engineering to [collaborate](https://handbook.gitlab.com/handbook/values/#collaboration) on delivering value to customers while still working iteratively.\n\n### 3) Nobody agrees on what the “MVC” actually is\n\n#### Signal\n\nPicture it: You’ve worked hard for weeks refining and distilling a big feature ask into a nicely designed MVC. It’s small, delivers value, and is beautiful to boot! You’ve convinced your PM to prioritize this beautiful little gem and it’s going onto the planning board. Everything feels amazing until… devastation!\n\nAfter engineering looked at it, they came back and said it was too large and would need to be broken down further. Now you’re at the end of your milestone and you’re swiftly picking away at your beautiful design into a shallow imitation of its former glory. \n\n#### Solution\n\nHowever, there is a simple way to keep this from happening: “[Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is a team sport”. The designer shouldn’t be the only person on the team compromising for the sake of MVC. With ThinkBIG, you have multiple chances to bring engineering into the fold early and with the full vision in mind. This means devs are part of the conversation from the start, able to craft a valuable iteration and your designs become the conversation piece of deciding “What can we do next to deliver an amazing experience to our customers?”\n\n### 4) We’re working so hard but not getting anywhere\n\n#### Signal\n\nWorking iteratively is incredibly powerful and at GitLab, we can see the value of an iterative approach. We’re able to change our priorities at a moment’s notice and the work we actually have to deliver is reasonable and manageable while continuously delivering new value to customers. There is, however, a small drawback: When you’re only focusing on the step immediately in front of you, it’s easy to get lost along the way.\n\n#### Solution\nAs a designer, we have a unique opportunity to be the navigator for our teams. Using the ThinkBIG model, designers are empowered to hold responsibility for the Vision. From here, the Product Manager/Product Designer relationship becomes a balance between the vision and the strategy. Designs based on the large vision are used to keep the team on track for hitting the targets that bring value to customers while allowing for collaboration with the rest of the team on what tiny steps we take to get there.\n\n### 5) Engineers are reworking a lot\n\n#### Signal\n\nMy engineer and I are excited to work on a new effort. I’ve designed the first iteration and successfully passed it to them.  While they’re building, I’m working on the design for the next iteration. A few weeks later the new changes are merged, the next iteration designs are ready, and customers are already seeing value. Your engineer looks at the next iteration and painfully mutters “Well, I’ll have to rewrite what I wrote the last milestone to account for this.”\n\n#### Solution\n\nIn a highly iterative development lifecycle, it’s not uncommon to have to rework things as the product evolves. However, it shouldn’t be happening every time. With ThinkBIG, engineers are informed of the long-term goal as well as the short-term MVC iteration. This extra context allows them to deliver the iteration while architecting their code in an informed way of where it will go.\n\n### Start Thinking BIGGER!\n\nAre some of these signals sounding familiar? Then switching your design style to ThinkBIG may be for you! The simplest way to make this change is to move iteration breakdown to **after** the design phase. It immediately shows engineers where we want to go as a product or feature, opens the implementation breakdown (MVC) conversation to the whole team, and provides incredibly valuable insight to everyone on the team. This model of working helps designers be more efficient, deliver results, and foster a tight collaboration with the broader team. To see this process in action, check out a [Package ThinkBIG around the dependency proxy design and research](https://www.youtube.com/watch?v=LXFu6oDxhsw). For more information, check out the GitLab Handbook on [ThinkBIG](https://about.gitlab.com/handbook/product/ux/thinkbig/) to learn more.\n","Unfiltered",[811,1698,9,832,2101],"AWS",{"slug":2103,"featured":6,"template":680},"five-signs-you-should-think-bigger","content:en-us:blog:five-signs-you-should-think-bigger.yml","Five Signs You Should Think Bigger","en-us/blog/five-signs-you-should-think-bigger.yml","en-us/blog/five-signs-you-should-think-bigger",{"_path":2109,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2110,"content":2116,"config":2121,"_id":2123,"_type":14,"title":2124,"_source":16,"_file":2125,"_stem":2126,"_extension":19},"/en-us/blog/five-things-you-hear-from-gitlab-ceo",{"title":2111,"description":2112,"ogTitle":2111,"ogDescription":2112,"noIndex":6,"ogImage":2113,"ogUrl":2114,"ogSiteName":667,"ogType":668,"canonicalUrls":2114,"schema":2115},"5 Things you might hear when meeting with GitLab's CEO","After two weeks shadowing our CEO, I can share the hottest topics on his mind right now.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670738/Blog/Hero%20Images/coghlanshadow.jpg","https://about.gitlab.com/blog/five-things-you-hear-from-gitlab-ceo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Things you might hear when meeting with GitLab's CEO\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Coghlan\"}],\n        \"datePublished\": \"2019-06-28\",\n      }",{"title":2111,"description":2112,"authors":2117,"heroImage":2113,"date":2118,"body":2119,"category":808,"tags":2120},[1816],"2019-06-28","\n\nDuring my two-week rotation in GitLab’s [CEO shadow program](/handbook/ceo/shadow/) I noticed something: Being a CEO involves a lot of repetition. Whether meeting with his executive team, board members, public or private market investors, candidates for [open roles](/jobs/), or journalists, our CEO [Sid Sijbrandij](/company/team/#sytses) had to repeat himself – a lot.\n\nThis shouldn’t be a surprise. I’ve read [articles](https://www.mckinsey.com/business-functions/organization/our-insights/the-ceos-role-in-leading-transformation) about the [importance of repetition](https://getlighthouse.com/blog/power-of-repetition-successful-leaders/) for leaders. My job can be pretty repetitive, too. I'm constantly planning meetups and explaining my role and the programs I manage to people throughout the wider GitLab community. And yet, given Sid’s position in GitLab and his desire to pursue “interestingness” (a Sid-ism I heard often), I was still surprised the 10th time I heard him tell the story of [how GitLab was founded](https://www.youtube.com/watch?v=CZ07wk3t31g&feature=youtu.be&t=135).\n\nI want to highlight a few of the other common themes, topics, and questions that came up repeatedly throughout my time in the CEO shadow program – to both share some insight with our community and inform folks who will be meeting with Sid about what to expect.\n\n## 1. \"We don't have any offices\"\n\nGitLab’s all-remote culture is a popular topic right now. It came up frequently in conversations with potential investors, candidates for executive positions, and journalists. People were curious to learn how we make it work at our scale and how we replicate the serendipitous moments that occur among co-located teams. Sid typically relies on explanations of our [handbook](/handbook/), [breakout calls](/handbook/communication/#breakout-call), [coffee chats](/company/culture/all-remote/tips/#coffee-chats), and [Contribute](https://www.youtube.com/watch?v=xdtPNXtkBhE) to help folks better understand how we are able to be successful as an all-remote company.\n\nIt's exciting to hear the conversation on all-remote work evolve as people learn more about it. One of the main reasons I joined GitLab was the ability to be part of an [all-remote](/company/culture/all-remote/) company. I believe we can change how the world views all-remote teams as we continue to be successful. With more than 2,000 [contributors](/community/contribute/), more than 600 people on our [team](/company/team/), and many more wanting to join, we are off to a good start.\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-cards=\"hidden\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Over the last 3 months we had over 20,000 applications for the vacancies at \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> \u003Ca href=\"https://t.co/JbmWvk3uDB\">https://t.co/JbmWvk3uDB\u003C/a> It encourages us to push for even more transparency \u003Ca href=\"https://t.co/WQcUPXzcWj\">https://t.co/WQcUPXzcWj\u003C/a> since many people cite that as a reason to apply.\u003C/p>&mdash; Sid Sijbrandij (@sytses) \u003Ca href=\"https://twitter.com/sytses/status/1134122539670691841?ref_src=twsrc%5Etfw\">May 30, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nOur success has [already inspired other companies to follow the all-remote blueprint](/handbook/inspired-by-gitlab/). The movement towards all-remote organizations will continue as we grow, generating more awareness and opening up opportunities that were never previously available to people around the world.\n\nHere's a recording of a meeting I attended between our CEO Sid and GitLab board member Sue Bostrom that touched on our all-remote story:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ePZpfeTG63M\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## 2. \"One of our values is...\"\n\nIn nearly every meeting I attended over the two-week rotation, [GitLab’s values](https://handbook.gitlab.com/handbook/values/) were mentioned. Transparency is the most common value highlighted when our CEO meets with members of the wider GitLab community (see above tweet), many of whom are surprised to see Sid using Google to search for our handbook, roadmap, or OKRs – which is possible because they’re published publicly on our website. With his executive team and other leaders in the company, Sid is frequently focused on [results](https://handbook.gitlab.com/handbook/values/#results) – from structuring his meetings with a bias to action (more on this later) to pushing for GitLab to always be more data-driven and analytical in how we execute on everything from [our releases to our vision](/handbook/product/product-processes/#planning-horizons). When something is moving slower than expected, Sid will encourage people to break down the work and make small changes that are easier to ship in alignment with our [iteration value](https://handbook.gitlab.com/handbook/values/#iteration).\n\nOur other values came up in conversations about how we recruit for our fast-growing team and the recruitment of a new chief people officer ([diversity](https://handbook.gitlab.com/handbook/values/#diversity-inclusion)), how well our people are performing as managers of one ([efficiency](https://handbook.gitlab.com/handbook/values/#efficiency)), and the importance of dogfooding our own product ([collaboration](https://handbook.gitlab.com/handbook/values/#collaboration)).\n\n## 3. \"Is this already in the handbook?\"\n\nAs I alluded to earlier, at GitLab we value results and that starts with the CEO. Internal meetings with Sid require an agenda. Those agendas typically follow a [specific format](/handbook/leadership/1-1/suggested-agenda-format/), and they are usually filled with merge requests and other actionable items. Meetings with our CEO are not for status updates. They tend towards discussions that lead to action or for taking action (such as reviewing and merging an MR that is linked to in a meeting agenda). When a discussion takes place without a related MR link in the agenda, Sid inevitably asks, \"Is this already in the handbook?\" or something to that effect. This ensures any follow-up actions are assigned to someone so that actionable, visible changes are not delayed.\n\nEven participation in the shadow program is viewed through the lens of results. As a shadow, one of the [tasks](/handbook/ceo/shadow/#tasks) you’re expected to complete is updating GitLab’s handbook, particularly the shadow page. During my rotation, Sid commented multiple times on the number of MRs that I created to update our handbook. Results have the CEO’s attention.\n\n## 4. \"Google Docs are the new whiteboard\"\n\nGoogle Docs are the default tool for GitLab agendas and meeting notes. While they are a necessity in the remote work environment, once you begin using them, you quickly notice the efficiency they bring to meetings. The delight that Sid draws from the efficiency of using Google Docs for notes is clear whenever he happily explains how they are superior to whiteboards, which happens frequently in meetings with people new to GitLab's way of working.\n\nAt GitLab, we find Google Docs to be so efficient and helpful, that we’ve even included [why to use them in our handbook](/company/culture/all-remote/tips/index.html#docs-beat-whiteboards). This handbook addition was contributed by my fellow CEO shadow, [Cindy Blake](/company/team/#cblake2000). In her words:\n\n> \"Often we are asked, 'But how do you whiteboard without everyone physically together?' We use Google Docs for collaboration. Every meeting has a Google Doc for the agenda and for documenting discussion, decisions, and actions. Everyone in the meeting adds notes at the same time. We literally even finish one another's sentences sometimes. By brainstorming in text, instead of drawings, we are forced to clearly articulate proposals, designs, or ideas, with less variance in interpretations. A picture may be worth a thousand words, but it is open to as many interpretations are there are people viewing it. In Google Docs, we use indentations to drill deeper into a given topic. This method retains context for comments, discussions, and ideas.\"\n\n## 5. \"Can you put your headphones on?\"\n\nThe emphasis on clear communication is a priority for Sid and leaks into many of his conversations. This ranges from his awareness and respect when communicating with folks for whom English is not their first language to how we name and structure the parts of our organization and to whether or not a meeting attendee is using headphones on a Zoom chat (note: you should). All of this – even the preference for headphones – makes sense.\n\nAt a macro level, as an all-remote, open core company with a global community and [team members in 54 countries](/company/team/), GitLab’s community consists of people with varying levels of English fluency. In order to promote a diverse and inclusive culture, it’s important to choose clear language when writing and speaking – from how we name teams and features to the idioms and slang we choose not to use. At a micro level, if you’re meeting with someone who has a poor video or audio connection the issue must be resolved so that everyone can understand each other and get through the agenda.\n\n## Takeaways\n\nWhether you're reading this because you have a meeting with Sid, you're joining the CEO shadow program, or you simply want to add some best practices from a CEO to incorporate in your routine, there are a few key takeaways to distill from these common topics and questions.\n\n* All-remote is gaining momentum\n* Values matter\n* Have a bias towards action\n* Find tools that work for you\n* Clear communication is key\n\nOne other thing you'll hear often when you're with Sid is \"Thank you.\" Despite being a CEO, Sid is generous with his time and praise and never fails to say thank you to folks he spends time with. As a parent of two young children myself, I think that might be the most important takeaway of all.\n",[832,811,9],{"slug":2122,"featured":6,"template":680},"five-things-you-hear-from-gitlab-ceo","content:en-us:blog:five-things-you-hear-from-gitlab-ceo.yml","Five Things You Hear From Gitlab Ceo","en-us/blog/five-things-you-hear-from-gitlab-ceo.yml","en-us/blog/five-things-you-hear-from-gitlab-ceo",{"_path":2128,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2129,"content":2135,"config":2141,"_id":2143,"_type":14,"title":2144,"_source":16,"_file":2145,"_stem":2146,"_extension":19},"/en-us/blog/five-ways-resist-service-wrapping-buyer-based-open-core",{"title":2130,"description":2131,"ogTitle":2130,"ogDescription":2131,"noIndex":6,"ogImage":2132,"ogUrl":2133,"ogSiteName":667,"ogType":668,"canonicalUrls":2133,"schema":2134},"5 Ways to resist the threat of service-wrapping with buyer-based open core","Commercial open source businesses are at risk of commoditization by hypercloud providers – here are some ways to avoid the trap.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680537/Blog/Hero%20Images/osls-buyer-based-open-source.jpg","https://about.gitlab.com/blog/five-ways-resist-service-wrapping-buyer-based-open-core","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Ways to resist the threat of service-wrapping with buyer-based open core\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-04-03\",\n      }",{"title":2130,"description":2131,"authors":2136,"heroImage":2132,"date":2138,"body":2139,"category":1517,"tags":2140},[2137],"Vanessa Wegner","2019-04-03","\n\nGitLab makes money as a commercial open source software (COSS) business. As you\nmight imagine, open source is at risk of becoming commoditized, just by its\ninherent characteristic of being completely … open. In today’s age of hyperclouds,\nopen source businesses are under threat of [service-wrapping via cloud\nproviders like Amazon](https://aws.amazon.com/blogs/aws/new-open-distro-for-elasticsearch/), Microsoft, and Google.\n\nTo avoid commoditization, [GitLab has tried a number of business models](/blog/monetizing-and-being-open-source/), from\ndonations to consultancy to single-tenant service, but none of them worked.\nFinally, we settled on open core. At this year’s Open Source Leadership Summit,\nour CEO [Sid Sijbrandij](/company/team/#sytses) talked about where GitLab has hedged its bet to avoid becoming obsolete.\nAs Sid describes in the presentation below, there are five key methods for resisting\ncommoditization with buyer-based open core.\n\n## Watch the presentation\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/G6ZupYzr_Zg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways\n\n### What is buyer-based open core?\n\nBuyer-based means that GitLab offers [four different tiers of the software](/pricing/), which offer different functionality based\non what each buyer persona needs.\n\n### How do you generate revenue with buyer-based open core?\n\nEach tier focuses on what the buyer wants – and nothing more. It is also priced\naccordingly. Those at a higher level in the organization often have more budget\nauthority – so they can spend budget on what provides value for them.\n\n### How can COSSes avoid commoditization?\n\n1. Insert proprietary functionality in a majority of your use cases.\n1. Offer many proprietary features.\n1. Offer interaction through a user interface, rather than through APIs.\n1. Cater to price-insensitive buyers.\n1. Attract users that rarely contribute to open source.\n\nLearn more about these best practices and how GitLab has implemented them by\n[watching Sid’s presentation](https://youtu.be/G6ZupYzr_Zg), or viewing his slides below:\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vRzKYXPPenZlKkbun3AklJP-xgrC4ga-AqBRyVxOAs2tczZ1VNNUGriYy0vF8iBccuT58rDcwateT3P/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"960\" height=\"569\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\nCover image by [Nastuh Abootalebi](https://unsplash.com/@sunday_digital) on\n[Unsplash](https://unsplash.com/photos/eHD8Y1Znfpk)\n{: .note}\n",[267,745,9],{"slug":2142,"featured":6,"template":680},"five-ways-resist-service-wrapping-buyer-based-open-core","content:en-us:blog:five-ways-resist-service-wrapping-buyer-based-open-core.yml","Five Ways Resist Service Wrapping Buyer Based Open Core","en-us/blog/five-ways-resist-service-wrapping-buyer-based-open-core.yml","en-us/blog/five-ways-resist-service-wrapping-buyer-based-open-core",{"_path":2148,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2149,"content":2155,"config":2160,"_id":2162,"_type":14,"title":2163,"_source":16,"_file":2164,"_stem":2165,"_extension":19},"/en-us/blog/friends-dont-let-friends-add-options-to-code",{"title":2150,"description":2151,"ogTitle":2150,"ogDescription":2151,"noIndex":6,"ogImage":2152,"ogUrl":2153,"ogSiteName":667,"ogType":668,"canonicalUrls":2153,"schema":2154},"Friends don't let friends add options to code","Creating optional features burdens users and applications – here's how we avoid adding options.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678953/Blog/Hero%20Images/options.jpg","https://about.gitlab.com/blog/friends-dont-let-friends-add-options-to-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Friends don't let friends add options to code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-12-10\",\n      }",{"title":2150,"description":2151,"authors":2156,"heroImage":2152,"date":2157,"body":2158,"category":743,"tags":2159},[930],"2018-12-10","\nSometimes, when trying to make it easier to work in an application, our instinct is to add\noptional features that users can enable if their situations require a specific functionality.\nOur intentions may be good, but these actions can actually cause _more_ problems, since we invite users\n to second-guess their choices by adding extra steps into the user experience.\n\n## The disadvantages of a [choose your own adventure](https://en.wikipedia.org/wiki/Choose_Your_Own_Adventure) model\n\nOne of the most celebrated aspects of [open source](/solutions/open-source/)\nis the freedom that allows developers to brighten a user’s day by adding an\noptional feature that may not be for everyone, but allows a small portion of users\nto engage with a project in a specific way. While it may seem like a great idea\nto cater to individual needs, there are several disadvantages to making something\nan option.\n\n### It creates more work for developers\n\nCreating extra options means more work for both frontend and backend teams.\nThese features add additional code, tests, and documentation for each setting,\nand the various states alter the UI. Adding options hurts you in every step of\nthe development process.\n\n### It places a burden on the user to choose\n\nWhen we solve problems by including options, we force a user to think about the\nfunction and consider its purpose and drawbacks, placing a burden on them to\ncontrol how they use an application. A user hesitates and has to make a decision\nabout whether this is something that should be enabled. After all, if an option\nsignificantly enhanced the user experience, then wouldn’t it have been automatically\nintegrated?\n\n### It makes future functionality more difficult to implement\n\nThere's also the long-term impact of additional options. Just one extra option can lead to one of two\npaths, which might influence other parts of an application. So, every\ntime we add an option, the number of states of the application doubles. That's\nexponential growth and it adds up quickly, making it harder to diagnose errors. Multiple\noptions can lead to the creation of states of which we’re unaware, so\nit’s harder for the user to understand how an application should behave, because\nthey don't know whether errors are due to an option or not. And, if it is an\noption causing the error, _which_ option is the problem?\n\n## How we avoid adding options: Bask in the glow of iteration\n\nSo, how do you know if a feature should be optional or not? At GitLab, we ship\nthe first [iteration](https://handbook.gitlab.com/handbook/values/#iteration) and keep delivering based on\nuser feedback. Some of the features that we anticipated may never roll out,\nbecause users didn’t request them. Iteration allows us to reduce the scope of\ndevelopment and avoid including features that aren’t popular or useable.\n\nWhenever users need something new, try to create a solution that's acceptable\nfor the most number of people. Rely on your development and operations teams to\nprovide feedback and ask them to relate to the end user. Conducting\n[UX research](/handbook/product/ux/ux-research/#ux-research) with your users\nalso helps identify pain points and needs.\n\nTeams are continually constrained by development capacity, and adding options to\napplications can absorb previous time and effort. We suggest shipping your\napplication without an option and waiting to see whether people request it or\nmake a\n[feature proposal](https://gitlab.com/gitlab-org/gitlab-ce/issues?label_name%5B%5D=feature+proposal)\nfor it. In the end, our role is to solve users’ problems, and our goal is to\nidentify the underlying cause of a challenge and fix it in a way that doesn't\nneed an option.\n\n[Cover image](https://unsplash.com/photos/pKeF6Tt3c08) by [Brendan Church](https://unsplash.com/@bdchu614) on Unsplash\n{: .note}\n",[9,700,745,723],{"slug":2161,"featured":6,"template":680},"friends-dont-let-friends-add-options-to-code","content:en-us:blog:friends-dont-let-friends-add-options-to-code.yml","Friends Dont Let Friends Add Options To Code","en-us/blog/friends-dont-let-friends-add-options-to-code.yml","en-us/blog/friends-dont-let-friends-add-options-to-code",{"_path":2167,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2168,"content":2174,"config":2180,"_id":2182,"_type":14,"title":2183,"_source":16,"_file":2184,"_stem":2185,"_extension":19},"/en-us/blog/from-berlin-to-new-zealand",{"title":2169,"description":2170,"ogTitle":2169,"ogDescription":2170,"noIndex":6,"ogImage":2171,"ogUrl":2172,"ogSiteName":667,"ogType":668,"canonicalUrls":2172,"schema":2173},"Visiting Family During COVID-19 (Germany to New Zealand)","My experience working for Gitlab traveling from Berlin to New Zealand on short notice","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672592/Blog/Hero%20Images/berlin-to-new-zealand-1.jpg","https://about.gitlab.com/blog/from-berlin-to-new-zealand","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Visiting Family During COVID-19 (Germany to New Zealand)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marc Shaw\"}],\n        \"datePublished\": \"2021-04-27\",\n      }",{"title":2169,"description":2170,"authors":2175,"heroImage":2171,"date":2177,"body":2178,"category":698,"tags":2179},[2176],"Marc Shaw","2021-04-27","The story started in January 2020, around the time chatter of COVID-19 started. I was concerned, but still relatively confident I would be able to travel home and see my grandparents later that year. The initial plan was to fly home to New Zealand in November 2020, and fly back to Berlin in March 2021. Little did I know that countries would nearly all but shut their borders completely over the next few months, restricting travel to only residents and citizens. After the first lockdown, and as summer approached, COVID-19 improved drastically in Berlin with Germany being one of the few countries that were handling the virus with poise and targeted/reasonable restrictions. Back in New Zealand, there was a very strict lockdown that lasted a couple of months, aiming to eradicate the virus from the country.\n\nFast forward to October 2020, Germany was getting hit with the start of their second wave and by November we had gone back into a lockdown. New Zealand on the other hand had introduced a strict quarantine system, one which required you to book months in advance, with openings often hard to come by. Having not seen my grandparents for two and a half years, I was anxious to see them again. This feeling was compounded due to health concerns unrelated to COVID-19, which caused two of them to be admitted to a hospital in late 2020.\n\nA year after the start of the story in January 2021, we had just entered our third consecutive month of being locked down in Berlin, Germany. I mentioned to my manager that I am thinking about trying to get a quarantine slot to make the 30+ hour flight from Berlin to New Zealand, her instant reaction was of support and asked if there was anything she could help with. I mentioned that nothing is set, but I will keep her updated on booking a quarantine slot. Luckily I managed to snap up a slot (after a week of trying) for the 2nd of February. I then booked my flights and messaged my manager. Since the time zones difference between Germany and New Zealand was around 12 hours, she sorted out meetings and suggested I take a few days off after landing to get over my jet lag even though it was in the same release. It was the genuine care shown and ease at making changes on short notice that I wholly appreciated.\n\nThroughout the whole experience, it has reinforced to me that GitLab practices the values that it preaches, and for me, this was shown through [family and friends being put first, work is second.](https://handbook.gitlab.com/handbook/values/#family-and-friends-first-work-second) The few months that I have been in New Zealand have made life for me exponentially better (given the circumstances). Living in a country without COVID-19 has allowed me to visit all my family, friends, and colleagues around the country, I have attended street festivals with over 100,000 people, indoor concerts, surfed, got my tattoo, gone hiking. This would not have been possible on such short notice for a lot of companies, potentially at all. Every day I reflect on where I am currently in terms of job, location, and everyone around me, and just spend a few minutes appreciating just how lucky I am.\n\n| Ngarunui Beach, Raglan | Cubadupa, Wellington | |:-------------------------:|:-------------------------:| | ![Raglan](https://about.gitlab.com/images/blogimages/berlin-to-new-zealand-2.jpg) | ![Cubadupa](https://about.gitlab.com/images/blogimages/berlin-to-new-zealand-3.jpg) |\n| Blue Spring, Putāruru | Kāpiti Coast | |:-------------------------:|:-------------------------:| | ![Blue Springs](https://about.gitlab.com/images/blogimages/berlin-to-new-zealand-4.jpg) | ![Raglan](https://about.gitlab.com/images/blogimages/berlin-to-new-zealand-5.jpg) |",[9,832],{"slug":2181,"featured":6,"template":680},"from-berlin-to-new-zealand","content:en-us:blog:from-berlin-to-new-zealand.yml","From Berlin To New Zealand","en-us/blog/from-berlin-to-new-zealand.yml","en-us/blog/from-berlin-to-new-zealand",{"_path":2187,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2188,"content":2194,"config":2199,"_id":2201,"_type":14,"title":2202,"_source":16,"_file":2203,"_stem":2204,"_extension":19},"/en-us/blog/from-dev-to-devops",{"title":2189,"description":2190,"ogTitle":2189,"ogDescription":2190,"noIndex":6,"ogImage":2191,"ogUrl":2192,"ogSiteName":667,"ogType":668,"canonicalUrls":2192,"schema":2193},"Complete DevOps is DevOps reimagined. Here's what that looks like","It's all systems go on Complete DevOps! We've re-imagined the scope of DevOps to bring development and operations work into a single application.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670652/Blog/Hero%20Images/dev-to-devops-cover.png","https://about.gitlab.com/blog/from-dev-to-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Complete DevOps is DevOps reimagined. Here's what that looks like\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2017-10-11\",\n      }",{"title":2189,"description":2190,"authors":2195,"heroImage":2191,"date":2196,"body":2197,"category":299,"tags":2198},[762],"2017-10-11","\n\nUpdate: for the most recent status of complete DevOps please see our [Product Vision](/direction/) page.\n\nEarlier this week [we announced our #CompleteDevOps vision](/blog/gitlab-raises-20-million-to-complete-devops/). Let's take a closer look at what that means, and how it's different from traditional DevOps.\n\n\u003C!-- more -->\n\n## Traditional vs. Complete DevOps\n\nIn the early days of software development the process of taking an idea to production was slow, insecure and vulnerable to errors. DevOps emerged as a way to foster collaboration and create faster iteration cycles with greater quality and security. As it sits today, DevOps is a set of practices at the intersection of development and operations. It was a huge step forward.\n\n\u003Cimg src=\"/images/blogimages/dev-to-devops-intersection.png\" alt=\"Intersection of Dev and Ops\" style=\"width: 500px;\"/>{: .shadow}\n\nBut it didn't go far enough.\n\nEven with the [adoption of DevOps](/topics/devops/), serious challenges continue to exist. Developers and operators used to be separate groups with separate tools. The people are now closer together but their tools are still apart. This hinders dev and ops teams from working together. Trying to glue their tools together with traditional DevOps applications doesn't solve the fundamental problem of having separate applications.\n\n\u003Cimg src=\"/images/blogimages/dev-to-devops-tools.jpg\" alt=\"Distinct tools of developers and operators\" style=\"width: 800px;\"/>{: .shadow}\n\n## Why Complete DevOps?\n\n\u003Cimg src=\"/images/blogimages/dev-to-devops-union.png\" alt=\"Union of Dev and Ops\" style=\"width: 500px;\"/>{: .shadow}\n\nComplete DevOps reimagines the scope of tooling to include both developers and operations teams in one unified solution. This dramatically reduces friction, increases collaboration, and drives a competitive advantage. Doing away with context switching and having all the necessary information in one place closes the loop and enables a better understanding of each team's needs.\n\n\u003Cimg src=\"/images/blogimages/dev-to-devops-advantages.jpg\" alt=\"The advantages of Complete DevOps\" style=\"width: 800px;\"/>{: .shadow}\n\n To make our vision a reality, we're working on a number of new features and improving on existing ones. You can take an in-depth at some of these in our Head of Product [Mark Pundsack](/company/team/#MarkPundsack)'s [outline here](/blog/devops-strategy/), or watch the full presentation about our Complete DevOps vision below.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/5dhjw-TT964?start=1437\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\nYou can also browse the slides at your leisure:\n\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vRVKUjMMa7M7lPV04_TMgfmd2Fj_kEQYW9-RvKAtKf799_Dwbfvos8diqinI-Uhm1uTwPYCdAPPzun1/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"1280\" height=\"749\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\nShare your thoughts, comments, and questions about #CompleteDevOps with us on [Twitter](https://twitter.com/gitlab)!\n",[9,1440,675],{"slug":2200,"featured":6,"template":680},"from-dev-to-devops","content:en-us:blog:from-dev-to-devops.yml","From Dev To Devops","en-us/blog/from-dev-to-devops.yml","en-us/blog/from-dev-to-devops",{"_path":2206,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2207,"content":2213,"config":2218,"_id":2220,"_type":14,"title":2221,"_source":16,"_file":2222,"_stem":2223,"_extension":19},"/en-us/blog/funny-gitlab-remote-meetings",{"title":2208,"description":2209,"ogTitle":2208,"ogDescription":2209,"noIndex":6,"ogImage":2210,"ogUrl":2211,"ogSiteName":667,"ogType":668,"canonicalUrls":2211,"schema":2212},"Wild and crazy things that only happen to all-remote teams","Working remotely may make for a calmer commute but plenty of adventure awaits.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680938/Blog/Hero%20Images/joshua-tree-leap.jpg","https://about.gitlab.com/blog/funny-gitlab-remote-meetings","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Wild and crazy things that only happen to all-remote teams\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Murph\"}],\n        \"datePublished\": \"2019-12-16\",\n      }",{"title":2208,"description":2209,"authors":2214,"heroImage":2210,"date":2215,"body":2216,"category":808,"tags":2217},[890],"2019-12-16","\n\nGitLab has more than [1,000 team members](/company/team/) across 65 (and counting!) countries. Every employee works remotely, from wherever they're most comfortable, and we have no company offices. While that allows us all to avoid the headaches of [commuting](/company/culture/all-remote/#for-the-world), it doesn't mean that our days are boring. Far from it, actually.\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">&quot;It was just this mad scramble to turn off the camera.&quot; More people are working remotely, leading to videoconference call faux pas. \u003Ca href=\"https://t.co/NbdEeWxbGv\">https://t.co/NbdEeWxbGv\u003C/a>\u003C/p>&mdash; The Wall Street Journal (@WSJ) \u003Ca href=\"https://twitter.com/WSJ/status/1159505825016295424?ref_src=twsrc%5Etfw\">August 8, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n[Tim Zallmann](/company/team/#timzallmann), a director of engineering at GitLab, was recently featured in a [Wall Street Journal report](https://www.wsj.com/articles/why-are-you-shirtless-when-remote-video-calls-go-wrong-11565280525?mod=e2tw) highlighting comedic happenings for those who work remotely. We polled the entire company to see if they had any stories to share, and the answers came rolling in.\n\n### Surprise guests\n\n![Monkeys in meetings](https://about.gitlab.com/images/blogimages/monkey-airbnb.jpg){: .shadow.medium.center}\nMonkey in the meeting.\n{: .note.text-center}\n\n> I was work-traveling last year for 6 weeks with my wife and kid through South Africa. One day, I was in a video call at a new Airbnb. I had my headphones in when a monkey tried to get through the window behind me. In the middle of the call, I was casually informed that there was a monkey behind me... which resulted in me screaming quite loudly, realizing the monkey was already well on its way inside.\n– [Tim Zallmann](/company/team/#timzallmann), director of engineering, Dev\n\n> When food delivery arrives in the middle of a meeting, but you didn't order enough for everyone.\n– [Patrick Harlan](/company/team/#pharlan), technical account manager\n\n> It's great when kids decide to jump into a call. Lots of big eyes and cute little hand waves. They also tend to whisper frantically into their parents' ears. Toddlers are the best.\n– [Christie Lenneville](/company/team/#clenneville), director of UX\n\n> Our dogs talk to each other. If I am on computer audio and my dog hears a GitLabber dog barking, he joins in. Some people – who shall remain nameless – like to tease dogs with pretend barking to see if they can get them to bark. We have also had pets join us for coffee chats and visit with each other.\n– [Kimberly Lock](/company/team/#kimlock), customer reference manager\n\n### Serendipitous run-ins\n\n![GitLab team members meet up for a day at the zoo](https://about.gitlab.com/images/blogimages/gitlab-san-diego-zoo.jpg){: .shadow.medium.center}\nGitLab team members meet up for a day at the zoo.\n{: .note.text-center}\n\n> I love traveling somewhere and instantly finding friends. I recently took a road trip with my family down the coast of California and met a GitLab team member who joined for a walk to the San Diego Zoo. I'd never met him before, but felt like an instant friend with so much to talk about.\n– [Priyanka Sharma](/company/team/#pritianka), director of technical evangelism\n\n> Joining a video call and finding out the person you are meeting with lives in your city.\n– [Lee Matos](/company/team/#leematos), Support engineering manager, Americas East\n\n\n\n### Moments fantastic and funny\n\n![Virtual happy hour](https://about.gitlab.com/images/blogimages/team-group-call-gitlab.jpg){: .shadow.medium.center}\nVirtual happy hour.\n{: .note.text-center}\n\n> We do virtual Friday happy hours with the team. We get on a big group call and everyone brings their beverage of choice (water, tea, whatever) and just chats for a few minutes about what they're doing for the weekend, etc. Fun times where you can bond with you co-workers. Even our [CEO Sid](/company/team/#sytses) shows up to many of them!\n– [Tina Sturgis](/company/team/#t_sturgis), senior manager, partner and channel Marketing\n\n> I have an LED color-changing light that I use at the foot of the basement stairs so my kids know if they can come in or not. Red, yellow, and green lights let them know if I'm on a call or taking a break (or somewhere in between).\n– [Brendan O'Leary](/company/team/#olearycrew), senior solutions manager\n\n> Green screen usage is a must! Cape Town, Star Trek ships, or a beach in Hawaii – the backdrop options on video calls are endless.\n– [Priyanka Sharma](/company/team/#pritianka), director of technical evangelism\n\n> When someone on a video call says \"Alexa\" and everyone's Alexa wakes up.\n– [Brendan O'Leary](/company/team/#olearycrew), senior solutions manager\n\n### GitLab's approach to meetings\n\nWe [approach meetings differently](/company/culture/all-remote/meetings/) at GitLab. While one's appearance, surroundings, and background can be the source of great stress and anxiety when preparing for a video call, GitLab team members are encouraged to bring their whole selves to work. That means we celebrate unique surroundings and welcome appearances from family and pets.\n\nLearn more about our [all-remote culture](/company/culture/all-remote/). If you're interested in being featured in the next round of remote outtakes, browse our [vacancies](/jobs/) and apply!\n\nCover image by Kevin Oliver\n{: .note}\n",[810,9,832],{"slug":2219,"featured":6,"template":680},"funny-gitlab-remote-meetings","content:en-us:blog:funny-gitlab-remote-meetings.yml","Funny Gitlab Remote Meetings","en-us/blog/funny-gitlab-remote-meetings.yml","en-us/blog/funny-gitlab-remote-meetings",{"_path":2225,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2226,"content":2231,"config":2237,"_id":2239,"_type":14,"title":2240,"_source":16,"_file":2241,"_stem":2242,"_extension":19},"/en-us/blog/fuzzit-acquisition-journey",{"title":2227,"description":2228,"ogTitle":2227,"ogDescription":2228,"noIndex":6,"ogImage":690,"ogUrl":2229,"ogSiteName":667,"ogType":668,"canonicalUrls":2229,"schema":2230},"Fuzzit - GitLab journey","From a bootstrap startup to integral part of GitLab.","https://about.gitlab.com/blog/fuzzit-acquisition-journey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Fuzzit - GitLab journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yevgeny Pats\"}],\n        \"datePublished\": \"2020-10-22\",\n      }",{"title":2227,"description":2228,"authors":2232,"heroImage":690,"date":2234,"body":2235,"category":698,"tags":2236},[2233],"Yevgeny Pats","2020-10-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n## Fuzzit Story\n\nFuzzit started in early 2019 by myself as a spin-off project from my consulting company.\nThe consulting revenue gave me the opportunity to dedicate time and explore the fuzzing-as-a-service idea a bit more without taking VC money too early and becoming “locked-in”.\n\nAfter about 6 months, Fuzzit started gaining traction and becoming a leader in the open-source community.\nBeing the first commercial product to offer languages such as: Go, Rust and more, while at the time OSS-Fuzz only supported C/C++ and wasn’t available for all oss projects. \n\nAfter about 8 months once the product matured thanks to input from the open-source users,\nwe went exploring the enterprise market more deeply. We developed that in 3 main directions: \n\n- Enterprise clients interviews and PoCs,\n- Partnerships with various CI providers to expand the reach.\n- Enterprise focused VCs\n\nIn that process we were lucky to meet with GitLab,\nwhere after a few calls it became apparent this could be a great fit for both sides to pursue  an acquisition (I’ll expand on that later on here).\n\nAt that point in time, we had to decide if we were either moving forward with an acquisition or going to raise funding to try and build a large business.\nIn our process of exploring the fuzzing enterprise market,\nwe understood that if we want to build a big DevSecOps company we would need to expand the offering far beyond continuous coverage-fuzzing.\nThis is of-course possible but will create even more fragmentation in the already fragmented market,\nand will require a substantial amount of financial investment.\nThe opportunity to join a unique place like GitLab for me personally and the amazing technological fit \nfor Fuzzit to be supported natively in a complete [DevSecOps platform](/solutions/security-compliance/), made the decision easy for me. \n\n## Acquisition process\n\nBeing part of a few acquisitions (some successful and some not)\nI can say first hand that the acquisition process is always a complex one,\nwhere only few acquisitions close in the end and many fall in various stages of the process.\nThe acquisition process was very transparent and efficient, as documented in the [handbook](https://about.gitlab.com/handbook/acquisitions/).\n\nCompletely by chance the head of corp dev, Eliran Mesika, is an Israeli which made things very easy for me personally as I could speak and negotiate in my mother tongue.\nGitLab grew in the last two years to over 1200 people, doubling the team, so understanding the structure and driving the process are not easy feats.\nThe process was very transparent even with some unexpected delays/bumps on the way.\n\nDuring the acquisition process I had the chance to meet quite a few people from the Secure team\nwhere we discussed the technology, how the integration will look like and make sure it’s a good fit for everyone both in terms of technology and culture.\nAfter term-sheet was signed, it was mainly legal-work and once that was complete I joined GitLab!\n\n## Joining GitLab\n\nMy vision at Fuzzit was to advance continuous coverage-guided fuzzing adoption to make software more secure.\nI’m only 5 months in but I feel that this vision fits perfectly at GitLab with its shift-left strategy and single DevSecOps application.\nI believe native support for continuous coverage-guided fuzzing in GitLab will lower the barrier to entry for developers, increase adoption and will make software more secure.\nI still have a lot of work and learning to do at Gitlab to achieve the above but so far we have made great progress.\n. It has been an awesome experience for me and hopefully for everyone else here who was involved!\n\nYou can checkout the current state and documentation of coverage-guided fuzzing in GitLab [here](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing)\n\n## Future\n\nStay tuned for future fuzzing features and blogs!\n",[9,720,873],{"slug":2238,"featured":6,"template":680},"fuzzit-acquisition-journey","content:en-us:blog:fuzzit-acquisition-journey.yml","Fuzzit Acquisition Journey","en-us/blog/fuzzit-acquisition-journey.yml","en-us/blog/fuzzit-acquisition-journey",{"_path":2244,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2245,"content":2251,"config":2257,"_id":2259,"_type":14,"title":2260,"_source":16,"_file":2261,"_stem":2262,"_extension":19},"/en-us/blog/gemnasium-our-gitlab-journey",{"title":2246,"description":2247,"ogTitle":2246,"ogDescription":2247,"noIndex":6,"ogImage":2248,"ogUrl":2249,"ogSiteName":667,"ogType":668,"canonicalUrls":2249,"schema":2250},"Gemnasium: Our GitLab journey","We joined GitLab as a small startup and quickly became an integral part of the company. We want to share our success story with the startup community.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679751/Blog/Hero%20Images/gemnasium-gitlab-cover.png","https://about.gitlab.com/blog/gemnasium-our-gitlab-journey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Gemnasium: Our GitLab journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Philippe Lafoucrière\"}],\n        \"datePublished\": \"2019-04-30\",\n      }",{"title":2246,"description":2247,"authors":2252,"heroImage":2248,"date":2254,"body":2255,"category":299,"tags":2256},[2253],"Philippe Lafoucrière","2019-04-30","\n\nGoing through acquisition is never easy, and often fails eventually. Two common scenarios can occur:\nEither the two companies are of similar size, and in this “merger” configuration employees are\nscared of duplicate jobs; or the buyer is slightly bigger than the seller, and there's a risk of losing\nthe culture and cohesion of the team. Ours was the latter: We were afraid of being absorbed and\ndigested completely, and eventually not working on the same subjects anymore, or not together.\nI’ve spent years building a trusting relationship with my team, and I was worried about\ntheir future in this new adventure.\n\nThis story is different. We’ve been at GitLab for over a year now, and it's all been for the best.\nWe still have the pleasure of working together, even though the team has doubled in size since.\nI wanted to share some reflections on Gemnasium's experience of being acquired by and integrated into GitLab:\n\n## Negotiating the acquisition\n\nWe had our share of ups and downs during the development of Gemnasium. I learned to be very\ncautious about my business relations. This due diligence is always a critical step, especially for\nthe buyer, to avoid any surprises and ensure the quality of the purchased product.\nDuring that step, we couldn’t answer all the requests from GitLab, since sharing algorithms and\nsource code was putting us at risk. But we explained why and managed to provide something\nclose enough to fulfil the requirement. We had open and healthy discussions at that point with\nGitLab, and it helped to create the trust we were looking for.\n\n## Joining GitLab\n\nWhen we joined the company, I was amazed to see everyone contributing to the [handbook](/handbook/).\nLiterally everyone, including PeopleOps, Sales, and Marketing. Committing changes with Git is the\nDNA of the company, and really makes a difference. There’s no one left behind, struggling with\nthe inherent technical difficulties of contributing to a shared repository.\nThen I discovered what fuels GitLab to make it so special: Slack. A lot of companies already use\nSlack, often for the best. But with GitLab being an all-remote company, Slack is a main communication\nchannel for everyone, including with other teams. At the time, GitLab was already present in\n40 countries (vs more than 50 as of today), which means a lot of time zones covered. There’s\nalways someone available to help and answer questions.\nEven administrative problems are taken care of by the People Ops team in a few minutes. Not\ndays, not weeks – minutes. It allows all employees to focus on what really matters: Delivering and\nmaking the product better. No need to follow up anymore, nothing to complain about; the burden\nis just gone, and everybody moves on. When you make the life of your employees easier, they are\nhappier and more productive. As simple as that.\n\n## Concluding the Gemnasium story\n\nThe acquisition, like everything else at GitLab, went extremely quickly – so quickly that we didn’t have the\nopportunity to bond one last time together as a team. That was a concern to me, also because\nthe onboarding was overwhelming. So many questions, so many processes and new concepts to\ndigest. Our Product Manager Fabio Busatto was really helpful and did everything he could\nto get up to speed as soon as possible. It felt obvious that we would benefit from having a “retreat”\nin a common place, to close the Gemnasium story, and put the new GitLab one on track. “We don’t\ndo that at GitLab, we’re a remote company,” was the first answer I got. I didn’t have to insist\ntoo much to convince our CEO [Sid](/company/team/#sytses), and I promised to keep everything cheap and neat. We already had two\nteam members in Quebec City, so it made sense to organize something here, to save on travel.\n“[Everything starts with an issue](/handbook/communication/#everything-starts-with-an-issue)” at GitLab, so I created\n[one to make this case](https://gitlab.com/gitlab-com/people-ops/General/issues/99).\nA few days later, the idea and budget were approved without any trouble. We could spend a\nwhole week altogether, and it was a great experience for all of us. The feedback from the team\nwas very positive, and it boosted morale as well.\n\n## Becoming the Secure Team\n\nAs an official part of GitLab, the Gemnasium team became the Security Products\nTeam, now called the [Secure Team](/handbook/engineering/development/sec/secure/). Our scope is much broader than just dependency scanning, and\nwe were expected to deploy SAST, DAST, and Container Scanning solutions. It took us less than a month\nto deliver an [MVC](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) of dependency scanning, based on Gemnasium. We were already working\nremotely, using GitLab, so the pipeline and other parts of the equation were familiar to us. Before the\nnext milestone following our arrival, Gemnasium was running on GitLab infrastructure.\n[Dmitriy](/company/team/#dzaporozhets) (CTO) and Sid (CEO) were really present, taking the pulse of the team, and helping us\nto remove any roadblocks. They didn’t try to force us to do this integration their way. It was really\na collaboration and every meeting began with them asking, “How can we help you?”\n\n## Transitioning from manager to individual contributor\n\nAfter a few months, it became obvious that the team was performing well and heading in the right direction.\nWe had results, customers, and a huge roadmap ahead of us. It was time to start hiring new engineers.\nBack then, I was meeting with a lot of customers, gathering feedback and ideas to help our product\nmanager, and helping with pre-sales. Hiring new engineers can be very time consuming, and\nwith our expectations for the Secure Team, that means a full-time job for a while. Instead of\nforcing me to stop what I was doing and start right away with the recruiting, my manager\ndecided to leverage my skills. I was recently promoted to a Distinguished Engineer position,\nwhich also means switching from the [management branch to the Individual Contribution path](/handbook/engineering/career-development/#individual-contribution-vs-management).\n\nThis is a big shift for me and the team, but in the end, it results in more space and latitude to\nwork on various subjects: Developing our Security Products is much more than just a roadmap\nand implementation. We need to understand the competition, discuss strategic partnerships,\nidentify risks and opportunities, and many other things left aside during all our regular processes.\nBeing my own boss for the last 10 years taught me to be efficient and put the team in the best position for success.\n\n> The bureaucracy that's often associated with large organizations\nis very limited, even after growing to more than 500 people\n\nMy manager, [Dalia Havens](/company/team/#dhavens), has been nothing but supportive\nin this area since the beginning, and a great [servant leader](https://en.wikipedia.org/wiki/Servant_leadership).\nGitLab has been successful so far because the bureaucracy that's often associated with large organizations\nis very limited, even after growing to more than 500 people. As soon as a roadblock is identified,\nwe can discuss collaborate to fix the problem, sometimes right away.\nReducing the number of steps necessary to actually achieve or deliver something is one of the keys\nto happiness for a team used to iterating daily.\n\nI think this is the main reason for the success of this acquisition. At no time did GitLab try to put us in a box.\nAs soon as the results are there, we’re free to experiment, to innovate, and more importantly, to build our own future.\n\n## Experimenting and innovating\n\nOne good example of this freedom to explore is the [auto-remediation feature](/direction/secure/#auto-remediation).\nIn 2014, we shipped our second iteration of the Auto-Update in Gemnasium. While the\nalgorithm behind the update sets had been improved, we were aware that the setup was far from\nsimple, which was against the philosophy behind Gemnasium: In order to work, our algorithm\nhad to run the pipeline, maybe multiple, consecutive times (with different update sets).\nThis was clearly hard to achieve for our users, and for our developers (we didn’t know anything about the test suite).\nBeing part of GitLab would solve that issue, as we would eventually be able to pilot the pipeline\nfor that. Even better, we would be able to hide the runs from the users.\n\nAfter a few customer meetings, it was obvious that this feature was a competitive advantage, and we decided to push\nit more. The whole team was excited to contribute to what would be the first MVC, as our product\nmanager helped to refine the feature, gluing all the pieces together. This step was essential:\nIt allowed everyone to contribute and influence the roadmap. Even as the company gets bigger\nevery day, we still feel empowered and a part of the decision-making process.\n\nThese past 12 months have been extremely exciting and rewarding. While we’re now fully integrated\ninto GitLab, we still feel the fresh air of freedom we had during the Gemnasium years. Even\nbetter, we can focus on what we love, and stop worrying about the short-term future.\n\nIf you're interested in being acquired by GitLab, we're actively looking for startups to join us.\nPlease visit our [acquisitions handbook](/handbook/acquisitions/) to find out more and to see if you\nare the right fit.\n",[9,720,873],{"slug":2258,"featured":6,"template":680},"gemnasium-our-gitlab-journey","content:en-us:blog:gemnasium-our-gitlab-journey.yml","Gemnasium Our Gitlab Journey","en-us/blog/gemnasium-our-gitlab-journey.yml","en-us/blog/gemnasium-our-gitlab-journey",{"_path":2264,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2265,"content":2271,"config":2278,"_id":2280,"_type":14,"title":2281,"_source":16,"_file":2282,"_stem":2283,"_extension":19},"/en-us/blog/geo-is-available-on-staging-for-gitlab-com",{"title":2266,"description":2267,"ogTitle":2266,"ogDescription":2267,"noIndex":6,"ogImage":2268,"ogUrl":2269,"ogSiteName":667,"ogType":668,"canonicalUrls":2269,"schema":2270},"Why we enabled Geo on the staging environment for GitLab.com","Geo is GitLab's solution for distributed teams and now we can validate and test it at scale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669656/Blog/Hero%20Images/donald-giannatti-4qk3nQI3WHY-unsplash-small.jpg","https://about.gitlab.com/blog/geo-is-available-on-staging-for-gitlab-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we enabled Geo on the staging environment for GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fabian Zimmer\"},{\"@type\":\"Person\",\"name\":\"Douglas Alexandre\"}],\n        \"datePublished\": \"2020-04-16\",\n      }",{"title":2266,"description":2267,"authors":2272,"heroImage":2268,"date":2275,"body":2276,"category":743,"tags":2277},[2273,2274],"Fabian Zimmer","Douglas Alexandre","2020-04-16","\nWe're testing Geo at scale on GitLab.com – our largest installation of GitLab – because we believe the best way to guarantee that Geo works as expected is to [use it ourselves](/handbook/product/product-processes/#dogfood-everything).\n\nGeo is GitLab's [solution for distributed teams](/solutions/geo/). We want teams all over the world to have a great user experience - independent of how far away users are from their primary GitLab installation. To accomplish this goal, read-only Geo nodes can be created across the world in close geographical proximity to your teams. These Geo nodes replicate important data, such as projects or LFS files, from the primary GitLab instance and thereby make the data available to users. Geo can also be used as part of a disaster recovery strategy because it adds data redundancy. Geo nodes follow the primary installation closely and allow customers to failover to this node in case the primary node becomes unavailable.\n\nMany of GitLab's customers use Geo on self-managed installations that serve hundreds to thousands of users. Geo is a critical component of GitLab installations and our customers expect Geo to work at any scale. We are testing Geo at scale on our GitLab.com installation because if it works for us, chances are it will work for our worldwide group of users too.\n\nIn this blog post, we'll explain why and how we chose to enable GitLab Geo on our pre-production environment (from now on referred to as \"staging\"), the challenges we encountered, some of the immediate benefits to our customers, and what will be next.\n\n## Why do we need to use Geo at GitLab?\nIn order to build the best product possible, we believe it is imperative to [use GitLab ourselves](/handbook/product/product-processes/#dogfood-everything). Many of our Geo customers have thousands of users actively using GitLab and a major challenge for the team was to test and validate new Geo functionality at scale. Enabling Geo on the GitLab.com staging environment makes this task a lot easier.\n\nWe also used Geo to [migrate GitLab.com from Microsoft Azure to Google Cloud in 2018](/blog/moving-to-gcp/), which allowed us to improve the product by identifying bottlenecks. In the last two years, GitLab has grown dramatically and in order to push Geo forward, we need to enable it (again).\n\n### Test Geo at scale\nWhen the team decides to add new functionalities to Geo, for example [package repository replication](https://gitlab.com/groups/gitlab-org/-/epics/2346), we had to ensure that the feature's performance is as expected. Having Geo available on staging allows us to deploy these changes behind a feature flag first and evaluate the performance before shipping the feature to customers. This is especially relevant to some of Geo's PostgreSQL database queries. On a small test deployment, things may look fine, but at scale these queries can time out, resulting in replication issues.\n\nWe also deploy code to our staging environment twice a week, which means that any regressions surface before a new packaged release.\n\n### Prove that Geo can be deployed as part of our production infrastructure\nA large amount of automation is required to run GitLab.com with millions of users, and our SRE team is constantly improving how we run GitLab.com. The first step bringing Geo into our production environment is to deploy Geo as a part of our staging environment. Without the right monitoring, runbooks, and processes in place, it would not be possible to move Geo into production where it could be used to enable geo-replication and/or as part of our disaster recovery strategy.\n\n## Setting up Geo on staging\n\nSetting up Geo on staging had some unique challenges, you can get a detailed overview in our [Geo on staging documentation](/handbook/engineering/development/enablement/systems/geo/staging.html).\n\nIn order to deploy Geo, we opted for a minimally viable approach that is sufficient for a first iteration. Geo is currently deployed as a single all-in-one box, not yet as a [Geo high-availability configuration](https://docs.gitlab.com/ee/administration/geo/replication/multiple_servers.html). Geo deploys happen automatically via Chef, similar to any other part of the infrastructure.\n\n![Geo staging Diagram](https://about.gitlab.com/images/blogimages/geo-on-staging/geo_staging_diagram.png){: .shadow.medium.center}\n\nWe currently replicate only a subset of data using [Geo's selective synchronization feature](https://docs.gitlab.com/ee/administration/geo/replication/configuration.html#selective-synchronization), which also allows us to dogfood this feature. Selective synchronization uses a number of complex database queries and this helps us validate those at scale. We chose to replicate the `gitlab-org` group, which contains mostly of GitLab's projects (including [GitLab](https://gitlab.com/gitlab-org/gitlab) itself).\n\nWe also needed to configure Geo to use the same logical [Gitaly shards](https://docs.gitlab.com/ee/administration/repository_storage_paths.html) on the secondary compared to the primary node. We'll [improve our Geo documentation](https://gitlab.com/gitlab-org/gitlab/-/issues/213840) to ensure it is clear when this is required.\n\nA logical Gitaly shard is an entry in the GitLab configuration file that points to a path on the file system and a Gitaly address:\n\n```\n\"git_data_dirs\": {\n  \"default\": {\n    \"path\": \"/var/opt/gitlab/git-data-file01\",\n    \"gitaly_address\": \"unix:/var/opt/gitlab/gitaly/gitaly.socket\"\n  }\n}\n```\n\nIn the example above, we have only one logical shard identified by the key `default`, but we could have as many as needed.\nEvery project on GitLab is associated with a logical Gitaly shard, which means that we know where all relevant data (repositories, uploads, etc.) is stored. A project `example` that is associated with the logical Gitaly shard `default`, would therefore be stored at `/var/opt/gitlab/git-data-file01` and the Gitaly server would be available at `/var/opt/gitlab/git-data-file01`.\n\nThis information is stored in the PostgreSQL database and in order for Geo to replicate projects successfully we needed to create the same Gitaly shard layout. On the Geo secondary node, we are using only one physical shard to store the data for all projects. To allow it to replicate any project from the primary node, we had to point all the logical Gitaly shards to the same physical shard on the secondary node.\n\nGeo on staging is configured to use [cascading streaming replication](https://www.postgresql.org/docs/current/warm-standby.html#CASCADING-REPLICATION), which allows one standby node in the staging [Patroni cluster](https://github.com/zalando/patroni) to act as relay and stream write-ahead logs (WAL) to the Geo secondary. This setup also has the advantage that Geo can't put an additional load onto the primary database node and we are also not using physical replication slots to further reduce the load. [Patroni will likely be supported in Omnibus packages](https://gitlab.com/groups/gitlab-org/-/epics/2588) and we will review these settings to allow our customers to benefit from this setup.\n\nPostgreSQL will automatically fall back on its `restore_command` to pull archived WAL segments using [wal-e](https://github.com/wal-e/wal-e), if it cannot retrieve the segment by streaming replication. This can happen after a failover, or if the replication target has deleted the relevant segment if Geo is lagging behind it.\n\nIn the future, we will use this to experiment with [high-availability configurations of PostgreSQL on a secondary Geo node](https://gitlab.com/groups/gitlab-org/-/epics/2536).\n\n## What we learned and how we can improve\n\nWe opened [23 issues before successfully rolling out Geo on our staging environment](https://gitlab.com/groups/gitlab-org/-/epics/1908) - this is too many. We know that installing and configuring Geo in complex environments is time-consuming and error-prone, and is an area where we can improve. The current process for a self-managed installation requires [more than 70 individual steps](https://gitlab.com/gitlab-org/gitlab-design/issues/731) - this is too much. [Geo should be simple to install](https://gitlab.com/groups/gitlab-org/-/epics/1465) and we aim to reduce the number of steps to below 10. Using Geo ourselves really underscored the importance of improvements in this area.\n\n### Some Geo PostgreSQL queries don't perform well\n\nGeo uses PostgreSQL Foreign Data Wrappers (FDW) to perform some cross-database queries between the secondary replica and the tracking database. FDW queries are quite elegant but have lead to some issues in the past. Specifically, staging is still running PostgreSQL 9.6, and Geo benefits from some FDW improvements available only in PostgreSQL 10 and later, such as join push-down and aggregate push-down.\n\nWhile enabling Geo on staging, some FDW queries timed out during the backfill phase. Until staging is being upgraded to a newer version of PostgreSQL, increasing the statement timeout to 20 minutes on the Geo secondary node was sufficient to allow us to proceed with the backfill.\n\nAs a direct consequence of enabling GitLab on staging, we are working to [improve Geo scalability by simplifying backfill operations](https://gitlab.com/groups/gitlab-org/-/epics/2851), eliminating these cross-database queries, and removing the FDW requirement. We also plan to [upgrade to PostgreSQL 11 in GitLab 13.0](https://gitlab.com/groups/gitlab-org/-/epics/2414).\n\n### Bug fixes\nWe've also discovered and fixed a number of bugs in the process, such as [failing to synchronize uploads with missing mount points](https://gitlab.com/gitlab-org/gitlab/-/issues/209752), [invalid ActiveRecord operations](https://gitlab.com/gitlab-org/gitlab/-/issues/210589), and [excessively re-synchronizing files in some situations](https://gitlab.com/gitlab-org/gitlab/-/issues/207808).\n\n## What's next?\nWe are already providing value to our customers by enabling Geo on staging because the Geo team can test and validate Geo at scale at lot easier. Next up is enabling [automatic runs of our end-to-end test on staging](https://gitlab.com/gitlab-org/quality/team-tasks/issues/385), which would reduce the manual testing burden even further. There are also some other improvements, such as [enabling high-availability configurations of PostgreSQL using Patroni on Geo nodes](https://gitlab.com/groups/gitlab-org/-/epics/2536) that we would like to test on staging.\n\nEven though enabling Geo on staging is already very useful, it is just a step forward to rolling out Geo on GitLab.com in production. We are currently evaluating the business case for enabling Geo on GitLab.com as part of our disaster recovery strategy and for geo replication.\n\nCover image by [Donald Giannatti](https://unsplash.com/photos/4qk3nQI3WHY) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[677,9,1296],{"slug":2279,"featured":6,"template":680},"geo-is-available-on-staging-for-gitlab-com","content:en-us:blog:geo-is-available-on-staging-for-gitlab-com.yml","Geo Is Available On Staging For Gitlab Com","en-us/blog/geo-is-available-on-staging-for-gitlab-com.yml","en-us/blog/geo-is-available-on-staging-for-gitlab-com",{"_path":2285,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2286,"content":2291,"config":2297,"_id":2299,"_type":14,"title":2300,"_source":16,"_file":2301,"_stem":2302,"_extension":19},"/en-us/blog/get-the-most-out-of-a-ceo-shadow-program",{"title":2287,"description":2288,"ogTitle":2287,"ogDescription":2288,"noIndex":6,"ogImage":2010,"ogUrl":2289,"ogSiteName":667,"ogType":668,"canonicalUrls":2289,"schema":2290},"15 tips to succeed at GitLab's CEO Shadow program","A CEO shadow program can be invigorating, but also intimidating. Here are strategies to help you make the most of the experience.","https://about.gitlab.com/blog/get-the-most-out-of-a-ceo-shadow-program","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"15 tips to succeed at GitLab's CEO Shadow program\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Neil McCorrison\"}],\n        \"datePublished\": \"2021-11-02\",\n      }",{"title":2287,"description":2288,"authors":2292,"heroImage":2010,"date":2294,"body":2295,"category":787,"tags":2296},[2293],"Neil McCorrison","2021-11-02","\n\nYou may already know that GitLab offers an incredible thing called the CEO Shadow program where anyone in the company is able to spend time with CEO and co-founder [Sid Sijbrandij](/company/team/#sytses). It's an opportunity to get a behind-the-scenes look at how our company functions. \n\nThere is a lot of [information](https://www.youtube.com/c/GitLabUnfiltered/search?query=ceo%20shadow) available about the program. But for anyone considering a CEO shadow program, either at [GitLab](/handbook/ceo/shadow/#alumni) or another company, here are 15 pieces of advice to get the most out of the experience.\n\n## 1. Take lots of notes\n\nI took copious notes in a separate document because there were so many interesting things that happened - things that I want to remember, follow up on and learn more about. I've heard other shadows have had a notepad handy. Also consider how to further leverage recordings. Even in a normally unrecorded session (like a 1:1), Sid may start a recording to capture fidelity above what notes alone provide. It's an amazing trick. \n\n## 2. Be open to anything\n\nSid asked about his presentation [energy score](https://blog.energybroker.ie/whats-your-personal-energy-rating), danced the [cabbage patch](https://en.wikipedia.org/wiki/Cabbage_Patch_(dance)) during a 1:1 to celebrate a win, and was able to match everyone's energy in a discussion. So make sure to bring that energy! Also, did you know we have a [songbook](/company/culture/songbook/) that everyone can contribute to?\n\n## 3. Make the most of breaks\n\nTen-hour days of back-to-back meetings are no joke. Take the [time to refresh](/handbook/ceo/shadow/#tips-for-remote-shadows) during the day when you have breaks. \n\n## 4. You won't be alone\n\nAt GitLab, you are one of two active shadows following the [\"see one, teach one\" rotation](/handbook/ceo/shadow/#rotation-rhythm). Expect to build a great partnership with your co-shadow.\n\n## 5. Be a good partner\n\nKeep your co-shadow visible in the Zoom gallery view. It’s nice to see body language cues. It’s important to constantly help each other out between note-taking and other tasks. \n\n## 6. Everyone can benefit from a coach\n\nSid has a coach to perfect his communication and presentation skills, and others can benefit from one as well. There are lots of resources available to GitLab employees that are highly recommended, including [Modern Health](/handbook/total-rewards/benefits/modern-health/).\n\n## 7. Keep your communications organized\n\nUtilize the [sidebar sections feature in Slack](/handbook/communication/#organizing-your-slack-sidebar-by-priority). Group the pertinent CEO Shadow channels and team members. You'll want to make sure you stay on top of those messages.\n\n## 8. Tame your schedule management software\n\nIf you have [Clockwise](/handbook/tools-and-tips/other-apps/#clockwise) installed, it will override your status in Slack and pause notifications (the `z` indicator). This can mean missing important messages depending on your configuration. You can disable this by running `cw settings` and pausing the status override.\n\n## 9. MRs are essential\n\nYes, [everything starts with an MR](/handbook/communication/#start-with-a-merge-request): Have a concern, idea or suggestion? It’s going to get more traction if you take a stab at drafting it through an MR first.\n\n## 10. Experiment with your screen layout\n\nNotes on the left or right? Place them at the top of your screen near the camera. It can be easy to sink into taking notes and forget that you are often live on YouTube. Check your video once in a while to check your posture, eye placement and lighting. Don't forget to smile!\n\n## 11. Time-keeping is important \n\nUse the [time-keeping shell script](/handbook/ceo/shadow/#keeping-time) to ensure meetings [end on time](/company/culture/all-remote/meetings/#start-on-time-end-on-time). It’s amazing, simple and something a lot of shadows continue to use after the program. \n\n## 12. Don't overthink taking notes\n\nDon’t try to understand context when someone starts talking. Just try to capture what they said accurately. Taking effective notes and having [live doc meetings](/company/culture/all-remote/live-doc-meetings/) are amazing skills that the shadow program will catapult you into. \n\n## 13. Listen carefully\n\nExpect to be asked for questions in or following [Valley meetings](/handbook/ceo/shadow/#valley-meetings). They are a unique opportunity and another great chance to participate.\n\n## 14. Early is the new on-time\n\nYou should always join a meeting a minute or two before. I found that doing this during the shadow program gave me extra time to chat with participants – and often Sid – before the main meeting started. \n\n## 15. Get a true glimpse at our core values\n\nYou'll see [transparency](https://handbook.gitlab.com/handbook/values/#transparency) at work. You'll see that our [E-group](/company/team/e-group/) is full of personable, down-to-earth people who thrive on collaboration. You'll hear iteration mentioned - a lot. Not because it's a buzzword, but because it's a highly effective way to develop software. \n\n> Sid said it best: \"Iteration is one of our super powers. It's super hard to do, but when you get it right, it's super effective. It allows you to innovate quickly.\"\n\n**[Join GitLab](/jobs/) and become a CEO Shadow yourself!**\n",[9,810,267],{"slug":2298,"featured":6,"template":680},"get-the-most-out-of-a-ceo-shadow-program","content:en-us:blog:get-the-most-out-of-a-ceo-shadow-program.yml","Get The Most Out Of A Ceo Shadow Program","en-us/blog/get-the-most-out-of-a-ceo-shadow-program.yml","en-us/blog/get-the-most-out-of-a-ceo-shadow-program",{"_path":2304,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2305,"content":2311,"config":2317,"_id":2319,"_type":14,"title":2320,"_source":16,"_file":2321,"_stem":2322,"_extension":19},"/en-us/blog/git-for-business-processes",{"title":2306,"description":2307,"ogTitle":2306,"ogDescription":2307,"noIndex":6,"ogImage":2308,"ogUrl":2309,"ogSiteName":667,"ogType":668,"canonicalUrls":2309,"schema":2310},"How we use Git as the blockchain for process changes","Git can be useful for more than just coding and operations. It can help you run your entire business – here's how we do it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679971/Blog/Hero%20Images/git-blockchain.jpg","https://about.gitlab.com/blog/git-for-business-processes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use Git as the blockchain for process changes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2019-01-15\",\n      }",{"title":2306,"description":2307,"authors":2312,"heroImage":2308,"date":2314,"body":2315,"category":808,"tags":2316},[2313],"Aricka Flowers","2019-01-15","\n\nGit may have started out as a way to collaborate on code, but there’s no denying that it has crept into the operations side of things. But does it stop there? We don’t think so.\n\nJust like [blockchain technology](https://blockgeeks.com/guides/what-is-blockchain-technology/) was originally created for cryptocurrency, but is now seen as a revolutionary way to share, store and update [all kinds of data](https://www.fool.com/investing/2018/04/11/20-real-world-uses-for-blockchain-technology.aspx), we see – and use – Git in much the same way.\n\nIn addition to version controlling code and the environment in which it lives, Git can also be used at a high level to facilitate the way a company actually functions, according to our CEO [Sid Sijbrandij](/company/team/#sytses).\n\nHe says GitLab is a prime example of how it can be done.\n\n## How we use Git to run GitLab, the company\n\n\"We’re not just trying to version our code and operations, we're also trying to version all the processes we have at the company, and we do that for a whole slew of reasons,\" says Sid. \"If you write your processes down, it's easier to change and for someone to propose a change. If it's all stored in people's heads, how are you going to change it? You'll have to create a presentation and make sure everyone reads it. But if it’s written down, it's faster to make a change and you're better able to communicate the context for it.\"\n\n### How Git has helped us to scale\n\nUsing Git to implement procedural changes within the company has helped GitLab shoulder growing pains, thanks to our [handbook](/handbook/).\n\n\"Although we're not a perfect company by any means, we've been able to scale really rapidly, onboard people and get them started with the work they have to do,\" Sid says. \"And I think our handbook and how we describe things is an important part of that. It's exciting to see it grow. The handbook is now over 2,000 pages, so people can't read everything anymore, but they can read the parts that are relevant to them, and it's really helping with organizational changes that are happening between different departments.\"\n\nSid admits running a business with Git collaboration can seem like a daunting task, especially for companies that did not start out functioning that way. But he urges business leaders to give the process a chance, pointing to a number of companies that are adopting Git as a way to make procedural changes, including O’Reilly Media and several law firms.\n\n## Two tips for adopting Git to run your business\n\n### 1. Evangelize from the top down\n\n\"First of all, this is super hard. It's unnatural and it requires constant campaigning from the top of the company,\" Sid said. \"The natural state is for all the documentation to get out of date, and for people to send each other emails and PowerPoints about the change they want to make without looking at the rest of the changes.\"\n\n### 2. Make processes easier to change\n\n\"What you frequently find in companies is that there's the official process, and then the process that people really use. You can prevent that by making processes easier to change. The reality is people are changing processes in a company every single day, and they have to make those changes quickly. So the harder you make it, the more diversions there will be between reality and what's in the handbook. Instead, empower everyone in the organization to make those changes and do so quickly. That is one of the most important things you can do.\"\n\n\"Our handbook is [Creative Commons](https://creativecommons.org/licenses/by-sa/4.0/), so feel free to use that as a starting point for anything that you do.\" [Tweet us](http://twitter.com/gitlab) if you do borrow from or adapt our handbook – we'd love to hear about it.\n\n[Cover image](https://unsplash.com/photos/mf-o1E7omzk) by [chuttersnap](https://unsplash.com/@chuttersnap) on Unsplash\n{: .note}\n",[811,1297,9,745,723],{"slug":2318,"featured":6,"template":680},"git-for-business-processes","content:en-us:blog:git-for-business-processes.yml","Git For Business Processes","en-us/blog/git-for-business-processes.yml","en-us/blog/git-for-business-processes",{"_path":2324,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2325,"content":2331,"config":2337,"_id":2339,"_type":14,"title":2340,"_source":16,"_file":2341,"_stem":2342,"_extension":19},"/en-us/blog/gitlab-14-modern-devops",{"title":2326,"description":2327,"ogTitle":2326,"ogDescription":2327,"noIndex":6,"ogImage":2328,"ogUrl":2329,"ogSiteName":667,"ogType":668,"canonicalUrls":2329,"schema":2330},"GitLab 14 signals shift to modern DevOps: A DevOps platform with velocity, trust, and visibility","GitLab 14 accelerates modern DevOps, bringing velocity with confidence, built-in security, and visibility into DevOps success.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668091/Blog/Hero%20Images/gitlab-version-14-wide.png","https://about.gitlab.com/blog/gitlab-14-modern-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 14 signals shift to modern DevOps: A DevOps platform with velocity, trust, and visibility\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brian Glanz\"}],\n        \"datePublished\": \"2021-06-22\",\n      }",{"title":2326,"description":2327,"authors":2332,"heroImage":2328,"date":2334,"body":2335,"category":1340,"tags":2336},[2333],"Brian Glanz","2021-06-22","\n\nThe DevOps era began with a big idea – dissolve silos to deliver better software, faster. In the transition from classic software paradigms, DIY DevOps toolchains were built with parts that were never designed to work together. That DIY DevOps era left many trapped in new silos, without visibility, and mired in maintenance. Business outcomes suffered as the potential of DevOps was never fully realized.\n\n\n## The next iteration of DevOps\nThere is a better way to build software. [GitLab 14](/gitlab-14/) delivers modern DevOps with a [complete DevOps platform](/topics/devops-platform/), for a streamlined experience that unleashes the power of DevOps. Over the past year, GitLab has shipped advanced DevOps platform capabilities that enable any team to build and deliver software with velocity, trust, and visibility – no matter their size, industry, or location.  \n\nWith enhancements across the software development lifecycle, GitLab has placed strongly in several market reports across a broad range of areas from [Enterprise Agile Planning](/analysts/gartner-eapt21/) and [Application Security Testing](/analysts/gartner-ast21/) to [Continuous Delivery and Release Automation](/analysts/forrester-cdra20/). Tying it all together with a platform approach is a keystone of the next shift in the DevOps movement. GitLab was named a representative vendor in a market overview of [DevOps platforms](/analysts/gartner-vsdp21/).\n\nAs a “new normal” is taking shape after the pandemic, companies worldwide are coming to grips with what it means to work in hybrid and remote environments. A modern DevOps solution needs to meet the emerging demands for a more flexible workplace. GitLab has been a pioneer and champion of remote work for years and was recently [mentioned by Fast Company as a world-changing idea](https://www.fastcompany.com/90624506/world-changing-ideas-awards-2021-general-excellence-finalists-and-honorable-mentions). Having unlocked many of the secrets to remote work success, GitLab stepped up to help others out by shipping a [Remote Work Playbook](/company/culture/all-remote/) and a Coursera course on “[How to Manage a Remote Team](https://www.coursera.org/learn/remote-team-management).” Our all-remote know-how and experience went into the development of GitLab 14 to build capabilities that work wherever you do. \n\n\n## Velocity with confidence\nGitLab 14 enables you to increase development velocity and stay confident with a consistent and efficient developer and operator experience, yielding a more predictable DevOps lifecycle. By using one platform for source code management, continuous integration (CI), continuous delivery (CD), infrastructure as code, security, and beyond, teams are more efficient, collaborative, and productive. Our [2021 Global DevSecOps Survey](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/) shows engineers are happier when they can focus on innovation and adding value than when maintaining integrations – and happy developers attract and retain talent.\n\nOrganizations with a mature DevOps culture know the value of managing configuration as code, IT infrastructure as code, and more, with the same platform and best practices used for application development. In GitLab 14, our [Pipeline Editor](/releases/2021/01/22/gitlab-13-8-released/#the-new-pipeline-editor-makes-cicd-easy-to-use) lowers the barrier to entry for CI/CD while also accelerating power users, with visual authoring and versioning, continuous validation, and pipeline visualization. GitLab 14’s [Kubernetes Agent](/blog/gitlab-kubernetes-agent-on-gitlab-com/) enables secure deployment to your cloud-native infrastructure. GitLab 14 also meets customers where they are by supporting GitOps with agent-based and agentless approaches and allows for deployments anywhere, regardless of whether infrastructure is cloud-native.\n\n\n## Visibility into DevOps success\nThe [DevOps Research and Assessment (DORA)](https://www.devops-research.com/research.html) firm’s industry-defining research shows how focused improvement of software delivery performance leads to positive business outcomes like happier customers, greater market share, and increased revenue. Focusing efforts requires measuring four metrics in particular that are highly correlated with business performance. These are deployment frequency, lead time for changes, time to restore service, and change failure rate. \n\nAs a complete DevOps platform, GitLab 14 is uniquely capable of delivering visibility into DevOps with out of the box measurement and visualization of operational metrics, including DORA metrics, that have come to define DevOps maturity. With that visibility comes confidence in the ability to drive both team performance and competitive advantage. \n\nGitLab 14 also takes the key next step toward actionability, with an array of customizable Value Stream Analytics to optimize workflows. Constituent analytics like mean time to merge can uncover bottlenecks such as dysfunction in code review, allowing management to identify the root causes of slowdowns in the DevOps lifecycle, and enabling IT leaders to align with business priorities.\n\n\n## Built-in security\nSecurity without sacrifice – the promise of [DevSecOps](/topics/devsecops/) – is realized with built-in security for platform-driven alignment that decreases exposure, while keeping projects on-time and on-budget. In a world where security is everyone’s responsibility, automating processes and policies gives developers and security pros the information they need to meet this responsibility.  \n\nEnforcing security on every commit is a matter of course in GitLab 14’s CI/CD, providing real-time feedback as development is happening. A Semgrep analyzer for application security testing offers access to a global rule registry and customization for policy requirements. Acquisitions of Fuzzit and Peach Tech, and GitLab’s new proprietary browser-based DAST crawler, test modern APIs and Single Page Applications (SPAs) demonstrating innovation to meet requirements of modern DevOps. New vulnerability management capabilities increase visibility, providing the controls and observability needed to protect the software factory and its deliverables.\n\n\n## Everyone can contribute\nGitLab 14 has been built by the company and the community together to advance global adoption of modern DevOps. \n\nThanks to GitLab’s open core model, more than 10,000 merge requests from the wider community have been merged into the product since January 2016. The wider community contributes alongside more than 1,300 GitLab team members, all working remotely from 68 countries. GitLab believes in a world where everyone can contribute.\n\nGitLab has more than 30 million estimated registered users, from startups to global enterprises, including Ticketmaster, Jaguar Land Rover, Nasdaq, Dish Network, Comcast, and [more who have shared their stories](/customers/), and who trust GitLab to deliver great software, faster.\n",[1440,9],{"slug":2338,"featured":6,"template":680},"gitlab-14-modern-devops","content:en-us:blog:gitlab-14-modern-devops.yml","Gitlab 14 Modern Devops","en-us/blog/gitlab-14-modern-devops.yml","en-us/blog/gitlab-14-modern-devops",{"_path":2344,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2345,"content":2351,"config":2357,"_id":2359,"_type":14,"title":2360,"_source":16,"_file":2361,"_stem":2362,"_extension":19},"/en-us/blog/gitlab-2018-year-in-review",{"title":2346,"description":2347,"ogTitle":2346,"ogDescription":2347,"noIndex":6,"ogImage":2348,"ogUrl":2349,"ogSiteName":667,"ogType":668,"canonicalUrls":2349,"schema":2350},"2018: GitLab's year in review","Take a look at the highlight reel from 2018 – from landing $100M in funding to welcoming a host of great open source projects to GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670162/Blog/Hero%20Images/happy-holidays-cover.png","https://about.gitlab.com/blog/gitlab-2018-year-in-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"2018: GitLab's year in review\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"},{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2019-01-10\",\n      }",{"title":2346,"description":2347,"authors":2352,"heroImage":2348,"date":2354,"body":2355,"category":299,"tags":2356},[784,2353],"Rebecca Dodd","2019-01-10","\n\nIn 2018, we added 289 new team members, raised another round of funding, spread the word about remote work, surpassed 2,000 contributors, welcomed some awesome open source projects to GitLab, and shipped 12 releases. It's been a banner year for GitLab, so before diving into 2019, we invite you to peer back through the mists of time at the top events from the past year, according to our community:\n\n- [Product news](#product-news)\n- [Community news](#community-news)\n- [Company news](#gitlab-news)\n\n## Product news\n\n### We announced GitLab Serverless\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Announcing GitLab Serverless 🚀\u003Ca href=\"https://t.co/Iu4GwHsaYK\">https://t.co/Iu4GwHsaYK\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1072521159638482945?ref_src=twsrc%5Etfw\">December 11, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We introduced Meltano\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Hey, data teams! We&#39;re working on a tool just for you. Read all about Meltano, from \u003Ca href=\"https://twitter.com/jakecodes?ref_src=twsrc%5Etfw\">@jakecodes\u003C/a> &amp; @tayloramurphy1 ✌️  \u003Ca href=\"https://t.co/egEzILPNzu\">https://t.co/egEzILPNzu\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1024773311367131137?ref_src=twsrc%5Etfw\">August 1, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### You got _really_ excited about the Web IDE\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">👋 Meet the GitLab Web IDE!\u003Ca href=\"https://t.co/vhx2RR1uU6\">https://t.co/vhx2RR1uU6\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1007679206187249664?ref_src=twsrc%5Etfw\">June 15, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We successfully migrated to GCP and have noticed some improvements\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">What&#39;s up with \u003Ca href=\"https://t.co/W0iwxWzEZ8\">https://t.co/W0iwxWzEZ8\u003C/a>? I wrote an analysis of \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@GitLab\u003C/a>&#39;s stability and performance since we migrated to \u003Ca href=\"https://twitter.com/googlecloud?ref_src=twsrc%5Etfw\">@googlecloud\u003C/a> in August.\u003Ca href=\"https://t.co/8JvvbVq9wJ\">https://t.co/8JvvbVq9wJ\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/GoogleCloud?src=hash&amp;ref_src=twsrc%5Etfw\">#GoogleCloud\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/Cloud?src=hash&amp;ref_src=twsrc%5Etfw\">#Cloud\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/DevOps?src=hash&amp;ref_src=twsrc%5Etfw\">#DevOps\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/SaaS?src=hash&amp;ref_src=twsrc%5Etfw\">#SaaS\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/performance?src=hash&amp;ref_src=twsrc%5Etfw\">#performance\u003C/a> \u003Ca href=\"https://t.co/L6TWhh2Z0B\">pic.twitter.com/L6TWhh2Z0B\u003C/a>\u003C/p>&mdash; Andrew Newdigate (@suprememoocow) \u003Ca href=\"https://twitter.com/suprememoocow/status/1050467664584462337?ref_src=twsrc%5Etfw\">October 11, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nAND you can check out [all our releases from 2018 (and from all time) over here](/releases/categories/releases/).\n\n## Community news\n\n### GNOME moves to GitLab\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Anyway, I&#39;m proud of \u003Ca href=\"https://twitter.com/gnome?ref_src=twsrc%5Etfw\">@gnome\u003C/a> because we achieved it, we made a huge effort on adapting and will continue doing it because that&#39;s who we are. And with this, I want to announce that the mass migration to \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> is now completed! Welcome all to 2018! 🎉\u003C/p>&mdash; Carlos Soriano (@csoriano1618) \u003Ca href=\"https://twitter.com/csoriano1618/status/1001501640623640577?ref_src=twsrc%5Etfw\">May 29, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### Drupal moves to GitLab\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Welcome to the party \u003Ca href=\"https://twitter.com/drupal?ref_src=twsrc%5Etfw\">@drupal\u003C/a>! 🎉 \u003Ca href=\"https://t.co/umLw6YlSTl\">https://t.co/umLw6YlSTl\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1030164542360375296?ref_src=twsrc%5Etfw\">August 16, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### Freedesktop.org moves to GitLab\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We get pretty excited when open source projects tell us they’re \u003Ca href=\"https://twitter.com/hashtag/movingtogitlab?src=hash&amp;ref_src=twsrc%5Etfw\">#movingtogitlab\u003C/a>. Welcome, \u003Ca href=\"https://twitter.com/hashtag/freedesktop?src=hash&amp;ref_src=twsrc%5Etfw\">#freedesktop\u003C/a>! \u003Ca href=\"https://t.co/oLIfXZb7Va\">https://t.co/oLIfXZb7Va\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1031864994747609088?ref_src=twsrc%5Etfw\">August 21, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### Errrrrybody is #movingtogitlab\n\nWell, not _quite_, but 10x the normal daily number is still a big deal 😎\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We&#39;re seeing 10x the normal daily amount of repositories \u003Ca href=\"https://twitter.com/hashtag/movingtogitlab?src=hash&amp;ref_src=twsrc%5Etfw\">#movingtogitlab\u003C/a> \u003Ca href=\"https://t.co/7AWH7BmMvM\">https://t.co/7AWH7BmMvM\u003C/a> We&#39;re scaling our fleet to try to stay up. Follow the progress on \u003Ca href=\"https://t.co/hN0ce379SC\">https://t.co/hN0ce379SC\u003C/a> and \u003Ca href=\"https://twitter.com/movingtogitlab?ref_src=twsrc%5Etfw\">@movingtogitlab\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1003409836170547200?ref_src=twsrc%5Etfw\">June 3, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### Y'all had _feelings_ about burnout\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">‣ Don&#39;t go straight to work after you wake up.\u003Cbr>‣ Put \u003Ca href=\"https://twitter.com/hashtag/Slack?src=hash&amp;ref_src=twsrc%5Etfw\">#Slack\u003C/a> notifications on dnd on weekends. \u003Cbr>‣ When you notice someone in a different time zone should be asleep, tell them.\u003Ca href=\"https://t.co/zKiytIMXsJ\">https://t.co/zKiytIMXsJ\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/975463098676076544?ref_src=twsrc%5Etfw\">March 18, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### And everyone struggles with Git sometimes\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Git happens! Here&#39;s how to fix it 💅\u003Ca href=\"https://t.co/IMAuDH8j3P\">https://t.co/IMAuDH8j3P\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1058445892464902146?ref_src=twsrc%5Etfw\">November 2, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">How &amp; why to keep your Git commit history clean 💻✨ via \u003Ca href=\"https://twitter.com/Kushal_Pandya?ref_src=twsrc%5Etfw\">@Kushal_Pandya\u003C/a> \u003Ca href=\"https://t.co/HbYv2KsyGQ\">https://t.co/HbYv2KsyGQ\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1006245935675006977?ref_src=twsrc%5Etfw\">June 11, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We celebrated 20 years of open source ❤️\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We&#39;re excited to celebrate the 20th anniversary of open source this year at \u003Ca href=\"https://twitter.com/hashtag/OSCON?src=hash&amp;ref_src=twsrc%5Etfw\">#OSCON\u003C/a>! Check out our brief history of OSS ✨ \u003Ca href=\"https://t.co/ox2s1rDS9f\">https://t.co/ox2s1rDS9f\u003C/a> \u003Ca href=\"https://t.co/LIdQtQWeoO\">pic.twitter.com/LIdQtQWeoO\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1018886162851811328?ref_src=twsrc%5Etfw\">July 16, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### And made GitLab Gold free for open source projects and educational institutions 🎉\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Today, we&#39;re excited to announce that GitLab Ultimate and Gold are now free for educational institutions and open source projects 💜\u003Ca href=\"https://t.co/5PA08IYnwM\">https://t.co/5PA08IYnwM\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1004033746897719298?ref_src=twsrc%5Etfw\">June 5, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We celebrated inspiring GitLab users\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Video and blog about my journey from stunting motorcycles to \u003Ca href=\"https://twitter.com/hashtag/Kubernetes?src=hash&amp;ref_src=twsrc%5Etfw\">#Kubernetes\u003C/a> - and some gushing about my love for \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> \u003Ca href=\"https://t.co/ro73lucF7n\">https://t.co/ro73lucF7n\u003C/a>\u003C/p>&mdash; Leah Petersen (@eccomi_leah) \u003Ca href=\"https://twitter.com/eccomi_leah/status/1009894688906792960?ref_src=twsrc%5Etfw\">June 21, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### And there was lots of love for GitLab swag\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003C!-- first tweet -->\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Thanks for the swag \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a>. That&#39;s one reason to contribute 😃 \u003Ca href=\"https://t.co/58Z1PsGTen\">pic.twitter.com/58Z1PsGTen\u003C/a>\u003C/p>&mdash; Amit Rathi (@amittrathi) \u003Ca href=\"https://twitter.com/amittrathi/status/1074562107545272320?ref_src=twsrc%5Etfw\">December 17, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C!-- second tweet -->\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">It’s the first time that I receive a birthday gift from the company that I work for. It’s simple, small and modest... but it goes a long way. Thanks \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> I love them 😍👍🎉 \u003Ca href=\"https://t.co/AMCUdQevFu\">pic.twitter.com/AMCUdQevFu\u003C/a>\u003C/p>&mdash; Matej Latin (@matejlatin) \u003Ca href=\"https://twitter.com/matejlatin/status/1039473209291231232?ref_src=twsrc%5Etfw\">September 11, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">When it comes to swag, \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> has raised the bar. This is an actual wooden pin. More pins as swag please 😬 cheers \u003Ca href=\"https://twitter.com/samdbeckham?ref_src=twsrc%5Etfw\">@samdbeckham\u003C/a> \u003Ca href=\"https://t.co/bcZtvqAjPE\">pic.twitter.com/bcZtvqAjPE\u003C/a>\u003C/p>&mdash; Sophie Koonin (@type__error) \u003Ca href=\"https://twitter.com/type__error/status/1058105160176726017?ref_src=twsrc%5Etfw\">November 1, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Thank you to \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> for the swag! Dear participants, we have many things for you 😁 \u003Ca href=\"https://t.co/9BINX4UbLD\">pic.twitter.com/9BINX4UbLD\u003C/a>\u003C/p>&mdash; TechForum eXplore (@TeXWL) \u003Ca href=\"https://twitter.com/TeXWL/status/1011652998953611268?ref_src=twsrc%5Etfw\">June 26, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">First package of swag for our CI/CD \u003Ca href=\"https://twitter.com/hashtag/Hackathon?src=hash&amp;ref_src=twsrc%5Etfw\">#Hackathon\u003C/a> arrived - yes: there will be \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> socks! 49 people RSVPed so far - it‘s gonna be epic! via \u003Ca href=\"https://twitter.com/MeetupDE?ref_src=twsrc%5Etfw\">@MeetupDE\u003C/a> \u003Ca href=\"https://t.co/fZtBd7VZRi\">https://t.co/fZtBd7VZRi\u003C/a> \u003Ca href=\"https://t.co/qyLbTeZN2t\">pic.twitter.com/qyLbTeZN2t\u003C/a>\u003C/p>&mdash; Michael Lihs (@kaktusmimi) \u003Ca href=\"https://twitter.com/kaktusmimi/status/970199201320665088?ref_src=twsrc%5Etfw\">March 4, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n## GitLab news\n\n### We announced Series D funding and joined the 🦄 club\n\nIn September we [announced $100 million in Series D funding](/blog/announcing-100m-series-d-funding/), led by ICONIQ Capital. This brought our valuation to over $1 billion, and we couldn't be more excited to use this momentum to become best-in-class in every DevOps software category, from planning to monitoring.\n\n### We made #44 on the Inc. 5000 list\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">ICYMI: We made number 44 on the \u003Ca href=\"https://twitter.com/hashtag/inc5000?src=hash&amp;ref_src=twsrc%5Etfw\">#inc5000\u003C/a> list of 2018&#39;s fastest-growing companies 🎉: \u003Ca href=\"https://t.co/x3jBqItfVK\">https://t.co/x3jBqItfVK\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1032201460946268160?ref_src=twsrc%5Etfw\">August 22, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We hung out IRL in beautiful Cape Town\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Good morning from Cape Town! It&#39;s Day 6 of the GitLab Summit 😍 \u003Ca href=\"https://t.co/WHvaSnKHM4\">pic.twitter.com/WHvaSnKHM4\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1034402765974450176?ref_src=twsrc%5Etfw\">August 28, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We made #17 on YC's 2018 Top Companies list\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Feeling proud of everyone \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@GitLab\u003C/a>: as of October 2018, we are number 17 of the \u003Ca href=\"https://twitter.com/ycombinator?ref_src=twsrc%5Etfw\">@YCombinator\u003C/a> Top Companies List, among companies like Airbnb, Stripe, Dropbox, Reddit and Twitch \u003Ca href=\"https://t.co/UQZCaBAUeJ\">https://t.co/UQZCaBAUeJ\u003C/a> \u003Ca href=\"https://t.co/YUJbDhRSyq\">pic.twitter.com/YUJbDhRSyq\u003C/a>\u003C/p>&mdash; Pedro MS (@PedroMScom) \u003Ca href=\"https://twitter.com/PedroMScom/status/1068146315404763139?ref_src=twsrc%5Etfw\">November 29, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### And we debuted some 🔥 integrations\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Apple just announced Xcode 10 is now integrated with GitLab \u003Ca href=\"https://t.co/eQbtiY4IYm\">pic.twitter.com/eQbtiY4IYm\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1003764673454342144?ref_src=twsrc%5Etfw\">June 4, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We’re so excited to announce the new GKE integration for GitLab! Now you’re just moments away from a scalable development environment. \u003Ca href=\"https://t.co/4RRVOXlrwz\">https://t.co/4RRVOXlrwz\u003C/a> \u003Ca href=\"https://t.co/RSWwZDSPup\">pic.twitter.com/RSWwZDSPup\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/981916957527044096?ref_src=twsrc%5Etfw\">April 5, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n\nThat's all for now! We can't wait to see what 2019 will bring 🌟 As always, come hang out and tweet us your thoughts [@gitlab](https://twitter.com/gitlab).\n",[267,9,675,745],{"slug":2358,"featured":6,"template":680},"gitlab-2018-year-in-review","content:en-us:blog:gitlab-2018-year-in-review.yml","Gitlab 2018 Year In Review","en-us/blog/gitlab-2018-year-in-review.yml","en-us/blog/gitlab-2018-year-in-review",{"_path":2364,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2365,"content":2371,"config":2377,"_id":2379,"_type":14,"title":2380,"_source":16,"_file":2381,"_stem":2382,"_extension":19},"/en-us/blog/gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse",{"title":2366,"description":2367,"ogTitle":2366,"ogDescription":2367,"noIndex":6,"ogImage":2368,"ogUrl":2369,"ogSiteName":667,"ogType":668,"canonicalUrls":2369,"schema":2370},"GitLab adds further measures to combat credential stuffing and other types of platform abuse","Integration of fraud detection and prevention tool into authentication flow increases risk reduction.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671606/Blog/Hero%20Images/workflow-tips-security-quality-cover.jpg","https://about.gitlab.com/blog/gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab adds further measures to combat credential stuffing and other types of platform abuse\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Monmayuri Ray\"}],\n        \"datePublished\": \"2022-08-19\",\n      }",{"title":2366,"description":2367,"authors":2372,"heroImage":2368,"date":2374,"body":2375,"category":720,"tags":2376},[2373],"Monmayuri Ray","2022-08-19","\n\nWith an observed increase in credential stuffing attacks, we at GitLab send periodic reminders to users to [enable multifactor authentication](https://docs.gitlab.com/ee/user/profile/account/two_factor_authentication.html), which helps to reduce this type of attack but does not entirely eliminate it. Since MFA is a choice per user discretion, we have some users who have not enabled MFA.\n[Credential stuffing attacks](https://owasp.org/www-community/attacks/Credential_stuffing) are particularly threatening because they are a popular method by which scammers take over users’ accounts, at scale.\n \nTo further reduce the threat of credential stuffing attacks on GitLab.com, the anti-abuse team at GitLab implemented additional protections when users authenticate. We contracted with fraud prevention and account security firm Arkose Labs to integrate [Arkose Protect](https://www.arkoselabs.com/arkose-protect/) into the user login flow to validate sessions before allowing successful login. This initiative was prioritized as part of a rapid action process where there was collaboration among various teams, engineers, and Arkose Labs for the implementation to go live on April 29, 2022.\n\nThe rapid action implementation reduces the risk of account takeover for GitLab.com users, while also reducing spam and crypto mining abuse of our users' projects.\n\n## How this risk reduction works\n\nWe look into several checks within the authentication flow, which include change in IP address, user activity, and failed login attempts for Arkose Labs to evaluate the risk of the session. The risk score is based on a multi-classification machine learning model of “high”, “medium”, “low”.\n\nIf the risk is rated low, the user is allowed to proceed to authenticate and has the same experience they had previously. Approximately 10% of the time the risk is higher. In that case, the user must complete an enhanced CAPTCHA from Arkose Labs before they are allowed to authenticate. Based on the feedback data, the score system is also adjusted and learns from reported false positives and false negatives.\n\nThe flow:\n\n![the flow](https://about.gitlab.com/images/blogimages/credentialstuffing3.png){: .shadow}\n\nImplementing these security controls reduces the risk of automated password guessing while also reducing automated account registrations that, as mentioned above, are used by some attackers to spam or do crypto mining. The reduction in abuse has been significant: Accounts blocked by automation and manually by our trust and safety team members were reduced by more than 40% as a result of these new features.\n\n## The future\n\nThe anti-abuse team is planning future work to further reduce abuse of our platform while minimizing the impact on legitimate users when they register for an account, authenticate, and use features that are sometimes abused (such as CI jobs being abused to do crypto mining). For example, we plan to have a holistic user scoring engine that can provide a trust score based on every activity. \n\nLearn more about [how GitLab works with Arkose Protect](https://docs.gitlab.com/ee/integration/arkose.html).\n\n",[1440,9,720],{"slug":2378,"featured":6,"template":680},"gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse","content:en-us:blog:gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse.yml","Gitlab Adds Further Measures To Combat Credential Stuffing And Other Types Of Platform Abuse","en-us/blog/gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse.yml","en-us/blog/gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse",{"_path":2384,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2385,"content":2391,"config":2397,"_id":2399,"_type":14,"title":2400,"_source":16,"_file":2401,"_stem":2402,"_extension":19},"/en-us/blog/gitlab-auto-devops-in-action",{"title":2386,"description":2387,"ogTitle":2386,"ogDescription":2387,"noIndex":6,"ogImage":2388,"ogUrl":2389,"ogSiteName":667,"ogType":668,"canonicalUrls":2389,"schema":2390},"GitLab Auto DevOps in action","See how the only single application for the entire DevOps lifecycle helps you deliver better software, faster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664015/Blog/Hero%20Images/laptop.jpg","https://about.gitlab.com/blog/gitlab-auto-devops-in-action","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Auto DevOps in action\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-08-10\",\n      }",{"title":2386,"description":2387,"authors":2392,"heroImage":2388,"date":2393,"body":2394,"category":743,"tags":2395},[2313],"2018-08-10","\n\nBetter and faster. These two words best describe the production goals of the IT leaders and engineers building today’s cutting-edge software. And GitLab [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) can help them hit those goals while improving their overall business outcomes.\n\nAs the only single application for the complete [DevOps](/topics/devops/) lifecycle, GitLab Auto DevOps gives development teams all the tools they need to deliver secure, high-quality software at previously unattainable speeds. The secret sauce that makes Auto DevOps so effective is the way it automatically sets up the required integrations and pipeline needed to get your software out of the door faster. With Auto DevOps, your code is automatically tested for quality, scanned for security vulnerabilities and licensing issues, packaged and then set up for monitoring and deployment, leaving engineers with time to place more attention on creating a better product.\n\nThis may all make sense in theory, but as they say, a picture is worth 1,000 words. And it is [rumored](https://idearocketanimation.com/4293-video-worth-1-million-words/?) that video is worth 1.8 million words. With that being said, why not take a look at GitLab Auto DevOps in action? \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4Uo_QP9rSGM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWant to learn more about GitLab Auto DevOps? Check out our [documentation](https://docs.gitlab.com/ee/topics/autodevops/), [feature](https://docs.gitlab.com/ee/topics/autodevops/) and [product vision](/direction/) pages.\n\n\nCover photo by [Ash Edmonds](https://unsplash.com/photos/Koxa-GX_5zs) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n",[1440,9,722,723,720,2396,677],"production",{"slug":2398,"featured":6,"template":680},"gitlab-auto-devops-in-action","content:en-us:blog:gitlab-auto-devops-in-action.yml","Gitlab Auto Devops In Action","en-us/blog/gitlab-auto-devops-in-action.yml","en-us/blog/gitlab-auto-devops-in-action",{"_path":2404,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2405,"content":2411,"config":2417,"_id":2419,"_type":14,"title":2420,"_source":16,"_file":2421,"_stem":2422,"_extension":19},"/en-us/blog/gitlab-ci-cd-features-improvements",{"title":2406,"description":2407,"ogTitle":2406,"ogDescription":2407,"noIndex":6,"ogImage":2408,"ogUrl":2409,"ogSiteName":667,"ogType":668,"canonicalUrls":2409,"schema":2410},"GitLab CI/CD's 2018 highlights","We move quickly, always with an eye to the future, but let's take a moment to look back on how GitLab CI/CD has evolved in the past six months.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663779/Blog/Hero%20Images/cicd-2018_blogimage.jpg","https://about.gitlab.com/blog/gitlab-ci-cd-features-improvements","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab CI/CD's 2018 highlights\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-01-21\",\n      }",{"title":2406,"description":2407,"authors":2412,"heroImage":2408,"date":2414,"body":2415,"category":299,"tags":2416},[2413],"Jason Yavorska","2019-01-21","\nHello everyone, and happy New Year! For those who don't know me, my name is [Jason Yavorska](/company/team/#jyavorska) and I've been the product manager of GitLab CI/CD since around the middle of last year. 2018 was a big year for CI/CD improvements in GitLab, and I'm so proud of our team and what we've been able to deliver in partnership with you, our users. Even just looking back on the last six months of improvements, we've delivered a ton of changes that move our vision for CI/CD forward, address important asks from our users, and build the foundation for an amazing 2019.\n\nBelow are a few of the highlights from my time here so far; be sure to let me know in the comments if I missed something that meant a lot to you.\n\n## Access control for GitLab Pages\n\nOne of the most amazing things about working for an open core company like GitLab is that our community of users can play an outsized role in how our product grows and develops, thanks to their always impressive contributions. Last year we introduced [Access control for Pages (11.5)](https://gitlab.com/gitlab-org/gitlab-ce/issues/33422), a feature with 304 👍 that was actually part of our 2019 vision, and was built thanks to a significant community contribution from MVP [Tuomo Ala-Vannesluoma](https://gitlab.com/tuomoa).\n\nThis was not just a great feature, but also highlights how GitLab and community contributors can work together to do amazing things. It came out shortly after I joined as a new product manager here, and it really opened my eyes to the possibilities inherent in working together transparently and openly with our user community. Now I don't think I could ever go back to any other way of working.\n\n## Feature flags\n\nI'm always looking for ways to expand our horizons and bring more great capabilities into the CI/CD space, and the team achieved that last year with [Allow users to create and manage feature flags for their applications (11.4)](https://gitlab.com/gitlab-org/gitlab-ee/issues/6220). A major piece of our 2018 vision, feature flags are so important to continuous delivery workflows since they allow you to safely isolate delivering your code to production, from the moment users engage with it, giving you more control and better options when it comes to how and when you deliver software.\n\n![CI/CD feature flags](https://about.gitlab.com/images/blogimages/cicd-feature_flags.png){: .shadow.medium.center}\n\n## Pipelines for merge requests\n\nSometimes, what you do in one year may be valuable on its own, but it also helps establish a foundation for more in the future. A common request from the community last year had been to make pipelines more aware of merge requests, so that at runtime, information such as the target branch, merge request name and ID, and other information was available to the pipeline. In 2018 we introduced [`only/except: merge_requests` for merge request pipelines (11.6)](https://gitlab.com/gitlab-org/gitlab-ce/issues/15310), which created this linkage. One great way to take advantage of this feature already is to use it to only create [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) on merge requests, helping to save money on environments versus creating them for every pipeline.\n\nPerhaps even more exciting than this feature on its own, is that it will continue to evolve and grow into the ability to [Run a pipeline on what the merged result will be](https://gitlab.com/gitlab-org/gitlab-ee/issues/7380). I can already say with confidence that this will be a game changer for teams that want to prioritize keeping their `master` branch green. As far as predicting the future outside of GitLab, I'm still accepting merge requests for that 😉\n\n![pipelines for merge requests](https://about.gitlab.com/images/blogimages/cicd-mr_pipelines.png){: .shadow.medium.center}\n\n## Usability improvements for the merge request widget\n\nSpeaking of merge requests, in general the team has made a lot of improvements to how the merge request widget interacts with CI/CD. We added [JUnit XML Test Summary (11.2)](https://gitlab.com/gitlab-org/gitlab-ce/issues/45318), part of our 2018 vision to make testing a more interactive part of the CI pipeline. We also now [Show enhanced information on running deploys (11.5)](https://gitlab.com/gitlab-org/gitlab-ce/issues/25140), and [Link directly to changed pages in Review App (11.5)](https://gitlab.com/gitlab-org/gitlab-ce/issues/33418), which uses [Route Maps](https://docs.gitlab.com/ee/ci/environments/index.html#go-directly-from-source-files-to-public-pages-on-the-environment) to send you directly to the updated content. Both of these changes were welcome improvements that made it much easier to see what was going on, all in one place.\n\n![CI/CD review app link](https://about.gitlab.com/images/blogimages/cicd-reviewapp_link.png){: .shadow.medium.center}\n\n## #movingtogitlab\n\n[#movingtogitlab](https://twitter.com/hashtag/movingtogitlab?src=hash) was an exciting movement in 2018, and I wanted to ensure a great experience for everyone checking us out, even if they were just trying out GitLab CI or other features, and still using GitHub for repositories. One of the challenges that people ran into early on was the way status checks were named by GitLab CI, which didn't play nicely with the way GitHub expected them to work. The team was able to introduce [Name status checks consistently to support GitHub-integrated CI workflow (11.5)](https://gitlab.com/gitlab-org/gitlab-ce/issues/53902) as a change to unblock this, ensuring a valuable experience for everyone, even if you weren't ready to go \"all in\" on GitLab yet.\n\n## Stewardship\n\nHere at GitLab, we take [stewardship of open source](/company/stewardship/) seriously. I was very happy to move the `include:` keyword from paid to free, because I know how important it is for CI/CD users to support proper reuse instead of copy-pasted code. [Move \"include external files in .gitlab-ci.yml\" from Starter to Core (11.4)](https://gitlab.com/gitlab-org/gitlab-ce/issues/42861) (with a grand total of 267 👍 on the issue) achieved this, and opened up new doors, not just for avoiding duplication, but also for more secure ways of implementing common workflows by moving compliance, security, and governance job implementation to a centrally controlled location.\n\n## Honorable mentions\n\nThere wasn't enough time to cover everything in this post without making it a mile long, but there are a few other honorable mentions I want to call out:\n\n- [11.2: Manually stopping environments](https://gitlab.com/gitlab-org/gitlab-ce/issues/25388) (with 245 👍 from our users) added the ability to manually stop your environments, such as review apps, instead of only through pipeline automation.\n- [11.3: Improve handling of includes in `.gitlab-ci.yml` to better enable script reuse/templates](https://gitlab.com/gitlab-org/gitlab-ce/issues/51521) introduced a new way to `extend` your job definitions using templates, including from across different files.\n- [11.4: Run jobs only/except when there are changes for a given path or file](https://gitlab.com/gitlab-org/gitlab-ce/issues/19232) (with a whopping 424 👍) gave you the ability to control whether a job runs or not, based on which files were changed.\n- [11.4: Add support for interactive web terminal to Docker executor](https://gitlab.com/gitlab-org/gitlab-runner/issues/3467) let you connect an interactive to a build/deploy environment and troubleshoot on the live runner host.\n- [11.4: Add timed deployments to AutoDevOps incremental rollouts](https://gitlab.com/gitlab-org/gitlab-ee/issues/7545) enabled new deployment strategies where the rollout was done over time in phases.\n- [11.5: `parallel` job keyword to speed up pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/21480) added an easy way to run parallel instances of a job without creating duplicate jobs in your `gitlab-ci.yml`.\n- [11.6: Allow pipelines to be deleted by project owners](https://gitlab.com/gitlab-org/gitlab-ce/issues/41875) (265 👍) gave control over removing old and invalid pipelines, as well as those which may have accidentally included sensitive information in the outputs.\n\n## What's next?\n\nOf course, the mission to improve GitLab CI/CD doesn't stop here. We're bringing [Brendan O'Leary](/company/team/#olearycrew) on board as the full-time product manager for CI (what we call the [Verify stage](/stages-devops-lifecycle/verify/)), freeing me up to focus entirely on CD (what we call [Release](/stages-devops-lifecycle/release/)). We're also significantly growing headcount for the engineering teams supporting us. Having full-time product managers and larger teams dedicated to each of these stages is going to allow us to deliver even more amazing things, even faster.\n\nI've touched on a couple points above, but tried to avoid making this a preview of what's coming for CI/CD in 2019. If you're interested in where Brendan and I are headed, you can visit our direction pages for [Verify (CI)](/direction/verify/) and [Release (CD)](/direction/release/), and feel free to reach out to us directly if you'd like to have a conversation – we'd love to chat about your ideas. Being a transparent, open core company, we also welcome participation in all of our public issues (which you'll find linked to from the above direction pages). For me, the best part of this job is interacting with you, the users of GitLab, so thank you for that opportunity. Here's to another great year of working together to make the job of delivering software fun and rewarding!\n\n## Just one more thing...\n\nI'd be remiss if I didn't mention how great GitLab is as a place to work. If you're interested in joining our all-remote team, we're constantly growing and looking for great PMs and others to join us. Check out [our jobs page](/jobs/) to learn more. I'd encourage you to apply even if you don't see an exact match – GitLab is great at finding the right fit for the right personality, even if that's not exactly listed on our hiring website. If you're really unsure, feel free to reach out to me directly ([@j4yav](https://twitter.com/j4yav)) and I'll help you get in touch with the right person.\n",[1090,267,677,9,745],{"slug":2418,"featured":6,"template":680},"gitlab-ci-cd-features-improvements","content:en-us:blog:gitlab-ci-cd-features-improvements.yml","Gitlab Ci Cd Features Improvements","en-us/blog/gitlab-ci-cd-features-improvements.yml","en-us/blog/gitlab-ci-cd-features-improvements",{"_path":2424,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2425,"content":2431,"config":2438,"_id":2440,"_type":14,"title":2441,"_source":16,"_file":2442,"_stem":2443,"_extension":19},"/en-us/blog/gitlab-com-artifacts-cdn-change",{"title":2426,"description":2427,"ogTitle":2426,"ogDescription":2427,"noIndex":6,"ogImage":2428,"ogUrl":2429,"ogSiteName":667,"ogType":668,"canonicalUrls":2429,"schema":2430},"GitLab.com CI artifacts to use Google Cloud CDN","GitLab CI users might benefit from faster downloads from edge caches closest to the user's location.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663009/Blog/Hero%20Images/ESA_case_study_image.jpg","https://about.gitlab.com/blog/gitlab-com-artifacts-cdn-change","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab.com CI artifacts to use Google Cloud CDN\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2022-10-25\",\n      }",{"title":2426,"description":2427,"authors":2432,"heroImage":2428,"date":2434,"body":2435,"category":675,"tags":2436},[2433],"Stan Hu","2022-10-25","\n\nOver the next month and going forward, requests for GitLab CI artifacts downloads may be redirected\nto [Google Cloud CDN](https://cloud.google.com/cdn) instead of\n[Google Cloud Storage](https://cloud.google.com/storage). We anticipate that GitLab CI users may benefit from faster\ndownloads from edge caches closest to your location.\n\n**Disclaimer:** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n\n## How will this work?\n\nCurrently when a CI runner or other client [downloads a CI artifact](https://docs.gitlab.com/ee/api/job_artifacts.html),\nGitLab.com responds with a 302 redirect to a time-limited, pre-signed URL with a domain of `storage.googleapis.com`.\n\nAfter this change, the domain will change to `cdn.artifacts.gitlab-static.net`.\n\nThe exception is for requests originating from within the Google Cloud\nPlatform. These will continue to be redirected to Cloud Storage.\n\n## When will this change occur?\n\nWe expect to start the transition around the end of October 2022. This will be a\ngradual transition using a percentage-based rollout, so we anticipate that you will see\nan increasing number of your requests redirected to Google Cloud\nCDN instead of Google Cloud Storage until all of the requests are served by the\nformer.\n\nYou can follow along with the progress of this initiative and raise any\nquestions in [this issue](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7894). We\nwill post more detailed timelines in that issue as we refine the rollout\nplan.\n\n## How does this change impact you?\n\nSince GitLab CI runners and certain clients automatically handle URL\nredirections already, we expect that downloads for CI artifacts should\ncontinue to work without any action.\n\nWe encourage upgrading to the latest version of the GitLab Runner in\norder to take advantage of the CDN. This feature was [introduced in\nGitLab Runner v13.1.0](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/2115).\nIf a runner cannot download from the CDN host, it will retry without the\nCDN and download the artifact directly through GitLab.com.\n\nHowever, if you have a firewall that only allows\n`storage.googleapis.com`, you will need to add\n`cdn.artifacts.gitlab-static.net` (34.110.204.38) to the allow list.\n\n### What do these warning messages mean?\n\nWith this change, users may see warning messages in the CI job logs:\n\n#### read: connection reset by peer\n\n```plaintext\nERROR: Downloading artifacts from coordinator... error couldn't execute GET against https://gitlab.com/api/v4/jobs/\u003Cjob id>/artifacts?direct_download=true: Get \"https://cdn.artifacts.gitlab-static.net/...\nread tcp 172.17.0.2:59332->34.110.204.38:443: read: connection reset by peer  id=1234 token=\u003Csome token>\nWARNING: Retrying...                                error=invalid argument\nDownloading artifacts from coordinator... ok        id=1234 responseStatus=200 OK token=\u003Csome token>\n```\n\nThis error suggests the runner was not able to access the CDN. Check\nyour network firewalls and allow access to the IP 34.110.204.38.\n\nNote that there are two `Downloading artifacts from coordinator`\nmessages. The second attempt succeeded because the runner retried\nwithout the CDN.\n\n#### x509: certificate signed by unknown authority\n\n```plaintext\nERROR: Downloading artifacts from coordinator... error couldn't execute GET against https://gitlab.com/api/v4/jobs/\u003Cjob id>/artifacts?direct_download=true: Get \"https://storage.googleapis.com/gitlab-gprd-artifacts/...: x509: certificate signed by unknown authority  id=1234 token=\u003Csome token>\n```\n\nIf you see this error with a Windows runner, upgrade to v15.5.0 since it\nis compiled with [Go 1.18](https://tip.golang.org/doc/go1.18), which\nsupports [using the system certificate pool](https://github.com/golang/go/issues/16736).\n\nOtherwise, this error suggests the runner is configured with [custom SSL certificates](https://docs.gitlab.com/runner/configuration/tls-self-signed.html).\nYou may need to update your certificates or include the certificates directly in the bundle.\n\n#### Authentication required\n\nSome clients may report a 401 error with `Authentication required` after\nrequesting to download a job artifact:\n\n```xml\n\u003C?xml version='1.0' encoding='UTF-8'?>\u003CError>\u003CCode>AuthenticationRequired\u003C/Code>\u003CMessage>Authentication required.\u003C/Message>\u003C/Error>\n```\n\nThis error message suggests the HTTP client is following the 302\nredirect and sending the `Authorization` header with the redirected\nURL. This is a known issue with Java HTTP clients.\n\nUpdate your client to drop the `Authorization` header the\nredirect. Google Cloud Storage ignores this header if it were set, but\nCloud CDN rejects requests that have the `Authorization` header set.\n",[1440,9,2437,231,1090],"customers",{"slug":2439,"featured":6,"template":680},"gitlab-com-artifacts-cdn-change","content:en-us:blog:gitlab-com-artifacts-cdn-change.yml","Gitlab Com Artifacts Cdn Change","en-us/blog/gitlab-com-artifacts-cdn-change.yml","en-us/blog/gitlab-com-artifacts-cdn-change",{"_path":2445,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2446,"content":2452,"config":2458,"_id":2460,"_type":14,"title":2461,"_source":16,"_file":2462,"_stem":2463,"_extension":19},"/en-us/blog/gitlab-com-container-registry-cdn-change",{"title":2447,"description":2448,"ogTitle":2447,"ogDescription":2448,"noIndex":6,"ogImage":2449,"ogUrl":2450,"ogSiteName":667,"ogType":668,"canonicalUrls":2450,"schema":2451},"GitLab.com Container Registry to use Google Cloud CDN","The GitLab.com Container Registry will now interface with the Google Cloud Content Delivery Network","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670766/Blog/Hero%20Images/container-reg-cdn-blog.jpg","https://about.gitlab.com/blog/gitlab-com-container-registry-cdn-change","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab.com Container Registry to use Google Cloud CDN\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2022-01-13\",\n      }",{"title":2447,"description":2448,"authors":2453,"heroImage":2449,"date":2455,"body":2456,"category":299,"tags":2457},[2454],"Darren Eastman","2022-01-13","\n\nIn January 2022, we are working on implementing a change to the Container Registry on GitLab.com. The GitLab Container Registry will now interface with the Google Cloud Content Delivery Network [CDN](https://cloud.google.com/cdn) to optimize costs and improve performance. When implemented, the system will redirect download requests for blobs stored in the GitLab Container Registry to Google Cloud CDN instead of Google Cloud Storage, as is the case today. We expect GitLab CI users to benefit from faster image downloads for those image layers retrieved from edge caches closest to your location.\n\n**Disclaimer** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n\n## How will this work?\n\nAuthorized requests for [downloading a blob](https://docs.docker.com/registry/spec/api/#pulling-a-layer) and [checking if a blob exists](https://docs.docker.com/registry/spec/api/#existing-layers) in the [GitLab.com Container Registry](https://docs.gitlab.com/ee/user/packages/container_registry) will be redirected to the Google Cloud CDN at `cdn.registry.gitlab-static.net`. So far, these requests were redirected to Google Cloud Storage at `storage.googleapis.com`.\n\nThe exception is for requests originating from within the Google Cloud Platform. These will continue to be redirected to Cloud Storage.\n\n## When will this change occur?\n\nWe expect to start the transition in late January 2022. This will be a gradual transition using a percentage-based rollout, so you can expect an increasing number of your requests to be redirected to Google Cloud CDN instead of Google Cloud Storage until all of them are served by the former.\n\nYou can follow along with the progress of this initiative and raise any questions in this [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/350048). We will post more detailed timelines in that issue as we refine the rollout plan.\n\n## How does this change impact you?\n\nSince most client tools, such as the Docker CLI, handle redirections automatically, this change will be imperceptible for most users on GitLab.com.\n\nHowever, if you are allow listing `storage.googleapis.com`, you will need to add `cdn.registry.gitlab-static.net` to the allow list as well. Please keep both endpoints on your allow list for the time being, as the transition will be gradual. There will be another blog post once the transition is complete.\n\n\nCover image by [Pat Kay](https://unsplash.com/photos/3d7DTnuNj6E) on [Unsplash](https://unsplash.com)\n{: .note}\n",[1440,9,2437,231],{"slug":2459,"featured":6,"template":680},"gitlab-com-container-registry-cdn-change","content:en-us:blog:gitlab-com-container-registry-cdn-change.yml","Gitlab Com Container Registry Cdn Change","en-us/blog/gitlab-com-container-registry-cdn-change.yml","en-us/blog/gitlab-com-container-registry-cdn-change",{"_path":2465,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2466,"content":2471,"config":2477,"_id":2479,"_type":14,"title":2480,"_source":16,"_file":2481,"_stem":2482,"_extension":19},"/en-us/blog/gitlab-com-container-registry-update",{"title":2467,"description":2468,"ogTitle":2467,"ogDescription":2468,"noIndex":6,"ogImage":2010,"ogUrl":2469,"ogSiteName":667,"ogType":668,"canonicalUrls":2469,"schema":2470},"Announcing an exciting update to the GitLab.com Container Registry","A new version of our Container Registry is coming with improvements we're excited about. Here's what you need to know.","https://about.gitlab.com/blog/gitlab-com-container-registry-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing an exciting update to the GitLab.com Container Registry\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2021-10-25\",\n      }",{"title":2467,"description":2468,"authors":2472,"heroImage":2010,"date":2474,"body":2475,"category":675,"tags":2476},[2473],"Tim Rizzi","2021-10-25","\n\nIn the coming weeks, we are planning to roll out a new version of the Container Registry on GitLab.com. Prior to deploying this major update, we wanted to clearly communicate the planned changes, what to expect, and why we are excited about this update. \n\nIf you have any questions or concerns, please don’t hesitate to comment in the [epic](https://gitlab.com/groups/gitlab-org/-/epics/5523). \n\n## Context \n\nIn [milestone 8.8](/releases/2016/05/22/gitlab-8-8-released/), GitLab launched the MVC of the Container Registry. This feature integrated the Docker Distribution registry into GitLab so that any GitLab user could have a space to publish and share container images. \n\nBut there was an inherent limitation with Docker Distribution as all metadata associated with a given image/tag was stored in the storage backend. This made using that metadata to build API features like storage usage visibility and sorting and filtering unfeasible. With the most recent update to the Container Registry, we’ve added a new metadata database that will store all of the metadata in Postgres instead of the storage backend. This will allow us to unblock many of the features that you’ve been asking for.\n\n## Why we are excited \n\n- [Storage visibility for the container registry](https://gitlab.com/groups/gitlab-org/-/epics/7225)\n- Performance improvements for list operations when using the GitLab API and UI\n- [Redesign of the UI](https://gitlab.com/groups/gitlab-org/-/epics/3211), including\n  - [Build and commit metadata for tags built via CI](https://gitlab.com/gitlab-org/gitlab/-/issues/197996)\n  - [Search by tag name](https://gitlab.com/gitlab-org/gitlab/-/issues/255614)\n  \n## The plan \n\nWe're planning a phased migration, starting with newly-created repositories. We'll roll this out incrementally to maintain safety for those customers and provide our team with an opportunity to identify and address any concerns. \n\n## Timing \n\nWe're starting the percentage-based rollout on October 26th, 2021, with GitLab internal projects' customers with less usage, which we expect to take 4 to 6 weeks. For more information about the planned, percentage-based rollout, please refer to this [epic](https://gitlab.com/groups/gitlab-org/-/epics/6426). \n\nOnce we complete that work, we’ll switch to customers who heavily use the Container Registry for new repositories. \n\n## FAQ \n\n- You mentioned new image repositories, but what about existing image repositories? \n  - The migration of newly-created repositories is phase 1 of this project. Once complete, we have some planned development work and then will begin to schedule the migration of existing repositories. Please stay tuned or follow along in this [epic](https://gitlab.com/groups/gitlab-org/-/epics/5523) for more information. \n- Do I need to do anything?\n  - No, the process is fully automated. \n- Is there anything I can do to help? \n  - Yes! Although no action is necessary, we recommend activating the Container Registry [cleanup policies](https://docs.gitlab.com/ee/user/packages/container_registry/#cleanup-policy) for any relevant projects. This will make [phase 2](https://gitlab.com/groups/gitlab-org/-/epics/6427) of the migration much faster. \n- Is the update required? \n  - Yes. This change will allow us to deliver a more modern and scalable product and you don’t want to miss out on those features.\n- Will there be any downtime?\n  - For phase 1 of the migration, which will focus on new image repositories, there is no expected downtime. \n- How can we learn more about phase 2? \n  - Right now we are focused on phase 1, but please feel free to ask any questions you may have in this [epic](https://gitlab.com/groups/gitlab-org/-/epics/6427).\n",[1440,9,231],{"slug":2478,"featured":6,"template":680},"gitlab-com-container-registry-update","content:en-us:blog:gitlab-com-container-registry-update.yml","Gitlab Com Container Registry Update","en-us/blog/gitlab-com-container-registry-update.yml","en-us/blog/gitlab-com-container-registry-update",{"_path":2484,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2485,"content":2490,"config":2495,"_id":2497,"_type":14,"title":2498,"_source":16,"_file":2499,"_stem":2500,"_extension":19},"/en-us/blog/gitlab-com-paid-features",{"title":2486,"description":2487,"ogTitle":2486,"ogDescription":2487,"noIndex":6,"ogImage":2010,"ogUrl":2488,"ogSiteName":667,"ogType":668,"canonicalUrls":2488,"schema":2489},"Introducing exclusive features to GitLab.com Bronze, Silver and Gold plans","New features are coming exclusively to GitLab.com paid plans – find out why and how to upgrade.","https://about.gitlab.com/blog/gitlab-com-paid-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing exclusive features to GitLab.com Bronze, Silver and Gold plans\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2017-09-01\",\n      }",{"title":2486,"description":2487,"authors":2491,"heroImage":2010,"date":2492,"body":2493,"category":299,"tags":2494},[950],"2017-09-01","\n\nAs of today, we're making some changes to our GitLab.com subscription plans, with some exclusive features included in Bronze, Silver and Gold plans.\n\n\u003C!-- more -->\n\nHere's how the changes will affect:\n- [existing Silver and Gold plan users](#how-will-these-changes-affect-existing-silver-or-gold-plan-users)\n- [existing Bronze plan users](#how-will-it-affect-existing-bronze-plan-users)\n- [existing Free plan users](#how-will-it-affect-existing-free-plan-users)\n\nAt GitLab, we're committed to providing an integrated solution that supports the entire software development lifecycle at a price where everyone can contribute. We also want to keep improving and adding new features to GitLab.com. Earlier this year we [introduced paid subscriptions](/blog/introducing-subscriptions-on-gitlab-dot-com/) to help us do just that. Initially all Silver plan features were temporarily available to all Free and Bronze plan users. But as of today, each plan will now only have the correct features associated with its plan level. **Public projects will still have free access to all features and unlimited CI/CD**, as part of our continued commitment to open-source software.\n\n## FAQ\n\n### What's changed in the GitLab.com subscription plans?\n\nWe've introduced exclusive paid features to GitLab.com's Bronze, Silver, and Gold plans. Starting on September 1st, all new GitLab.com accounts will only have access to the features outlined in their plan. See our [GitLab.com pricing page](/pricing/#gitlab-com) for information on what features are available in each plan. We'll continue to add features to the plans with each new release.\n\n### How will these changes affect existing Silver or Gold plan users?\n\nThis change has no effect on teams/individuals who purchased the Silver or Gold GitLab.com plans.\n\n### How will it affect existing Free plan users?\n\nFor existing users on the Free plan, we've created a special Early Adopter Plan for you. This plan has all of the existing features available in our Silver plan, with the exception of additional CI minutes or premium support. Any group or user account created before September 1st will be put onto this plan for a year for free. While we will not add new paid features to this plan, you'll continue to enjoy powerful features, like multi-project pipelines and canary deployments, for the next year. After 12 months, you will get rolled back to the Free plan. You can upgrade at any time.\n\n### How will it affect existing Bronze plan users?\n\nFor existing users on the Bronze plan, you will continue to have access to Bronze features, but not the Silver features that were previously included. However, we will be adding new Bronze features for you in the coming releases, whereas users on the Early Adopter plan will need to upgrade to enjoy any new features we add in the future.\n\n### What about public projects?\n\nWe're still committed to open-source software, so all paid features are also available to all public projects on GitLab.com.\n\n### What if I want to upgrade my plan today?\n\nFor users that are interested in upgrading their plan, please visit the [GitLab.com pricing](/pricing/#gitlab-com) page and click on the **Buy Now** button. The benefit of upgrading your plan today is that you will get access to the upcoming GitLab.com features that will only be available in paid plans.\n\n### What if I have questions or comments about the change?\n\nIf you have questions or feedback about these changes, please let us know by [filling out this feedback form](https://docs.google.com/forms/d/e/1FAIpQLSdr-Top4N4oObYaj_5ShwcVNhysSheSfH_x-r_nENLBeRGtjQ/viewform).\n\n## What's included in the plans\n\n### Free Plan\n\nAt GitLab, we \u003Ci class=\"fas fa-heart\" aria-hidden=\"true\">\u003C/i> free and are committed to offering a free plan with unlimited private repos, unlimited contributors, and access to an end-to-end development solution. This is a great option for personal or small projects.\n\n### Bronze Plan\n\nFor teams that need access to more advanced workflow features like [multiple issue boards](https://docs.gitlab.com/ee/user/project/issue_board.html#multiple-issue-boards), [issue boards with milestones](https://docs.gitlab.com/ee/user/project/issue_board.html#board-with-a-milestone), [multiple approvers](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html), [burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html) and more. The Bronze plan also includes next business day [support](/support/).\n\n### Silver Plan\n\nFor teams who need more robust DevOps capabilities. Features include everything in the Free and Bronze plans, plus [multi-project pipeline graphs](https://docs.gitlab.com/ee/ci/multi_project_pipelines.html), [deploy boards](https://docs.gitlab.com/ee/user/project/deploy_boards.html) and [canary deployments](https://docs.gitlab.com/ee/user/project/deploy_boards.html#canary-deployments). Silver plan users get 10,000 CI minutes and 24/7 emergency support.\n\n### Gold Plan\n\nGold users have access to all Free, Bronze and Silver features, plus 50,000 CI pipeline minutes per month on our shared runners. This is great for teams with heavy CI/CD usage.\n\nSee [our pricing page](/pricing/#gitlab-com) for a full list of features included in each plan.\n",[675,9],{"slug":2496,"featured":6,"template":680},"gitlab-com-paid-features","content:en-us:blog:gitlab-com-paid-features.yml","Gitlab Com Paid Features","en-us/blog/gitlab-com-paid-features.yml","en-us/blog/gitlab-com-paid-features",{"_path":2502,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2503,"content":2509,"config":2516,"_id":2518,"_type":14,"title":2519,"_source":16,"_file":2520,"_stem":2521,"_extension":19},"/en-us/blog/gitlab-com-stability-post-gcp-migration",{"title":2504,"description":2505,"ogTitle":2504,"ogDescription":2505,"noIndex":6,"ogImage":2506,"ogUrl":2507,"ogSiteName":667,"ogType":668,"canonicalUrls":2507,"schema":2508},"What's up with GitLab.com? Check out the latest data on its stability","Let's take a look at the data on the stability of GitLab.com from before and after our recent migration from Azure to GCP, and dive into why things are looking up.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671280/Blog/Hero%20Images/gitlab-gke-integration-cover.png","https://about.gitlab.com/blog/gitlab-com-stability-post-gcp-migration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What's up with GitLab.com? Check out the latest data on its stability\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Newdigate\"}],\n        \"datePublished\": \"2018-10-11\",\n      }",{"title":2504,"description":2505,"authors":2510,"heroImage":2506,"date":2512,"body":2513,"category":743,"tags":2514},[2511],"Andrew Newdigate","2018-10-11","\nThis post is inspired by [this comment on Reddit](https://www.reddit.com/r/gitlab/comments/9f71nq/thanks_gitlab_team_for_improving_the_stability_of/),\nthanking us for improving the stability of GitLab.com. Thanks, hardwaresofton! Making GitLab.com\nready for your mission-critical workloads has been top of mind for us for some time, and it's\ngreat to hear that users are noticing a difference.\n\n_Please note that the numbers in this post differ slightly from the Reddit post as the data has changed since that post._\n\nWe will continue to work hard on improving the availability and stability of the platform. Our\ncurrent goal is to achieve 99.95 percent availability on GitLab.com – look out for an upcoming\npost about how we're planning to get there.\n\n## GitLab.com stability before and after the migration\n\nAccording to [Pingdom](http://stats.pingdom.com/81vpf8jyr1h9), GitLab.com's availability for the year to date, up until the migration was **[99.68 percent](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=527563485&range=F2)**, which equates to about 32 minutes of downtime per week on average.\n\nSince the migration, our availability has improved greatly, although we have much less data to compare with than in Azure.\n\n![Availability Chart](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=458170195&format=image)\n\nUsing data publicly available from Pingdom, here are some stats about our availability for the year to date:\n\n| Period                                 | Mean-time between outage events |\n| -------------------------------------- | ------------------------------- |\n| Pre-migration (Azure)                  | **1.3 days**                    |\n| Post-migration (GCP)                   | **7.3 days**                    |\n| Post-migration (GCP) excluding 1st day | **12 days**                     |\n\nThis is great news: we're experiencing outages less frequently. What does this mean for our availability, and are we on track to achieve our goal of 99.95 percent?\n\n| Period                    | Availability                                                                                                                   | Downtime per week |\n| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------- |\n| Pre-migration (Azure)     | **[99.68%](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=527563485&range=F2)**  | **32 minutes**    |\n| Post-migration (GCP)      | **[99.88 %](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=527563485&range=B3)** | **13 minutes**    |\n| Target – not yet achieved | **99.95%**                                                                                                                     | **5 minutes**     |\n\nDropping from 32 minutes per week average downtime to 13 minutes per week means we've experienced a **61 percent improvement** in our availability following our migration to Google Cloud Platform.\n\n## Performance\n\nWhat about the performance of GitLab.com since the migration?\n\nPerformance can be tricky to measure. In particular, averages are a terrible way of measuring performance, since they neglect outlying values. One of the better ways to measure performance is with a latency histogram chart. To do this, we imported the GitLab.com access logs for July (for Azure) and September (for Google Cloud Platform) into [Google BigQuery](https://cloud.google.com/bigquery/), then selected the 100 most popular endpoints for each month and categorised these as either API, web, git, long-polling, or static endpoints. Comparing these histograms side-by-side allows us to study how the performance of GitLab.com has changed since the migration.\n\n![GitLab.com Latency Histogram](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/azure_v_gcp_latencies.gif)\n\nIn this histogram, higher values on the left indicate better performance. The right of the graph is the \"_tail_\", and the \"_fatter the tail_\", the worse the user experience.\n\nThis graph shows us that with the move to GCP, more requests are completing within a satisfactory amount of time.\n\nHere's two more graphs showing the difference for API and Git requests respectively.\n\n![API Latency Histogram](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/api-performance-histogram.png)\n\n![Git Latency Histogram](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/git-performance-histogram.png)\n\n## Why these improvements?\n\nWe chose Google Cloud Platform because we believe that Google offer the most reliable cloud platform for our workload, particularly as we move towards running GitLab.com in [Kubernetes](/solutions/kubernetes/).\n\nHowever, there are many other reasons unrelated to our change in cloud provider for these improvements to stability and performance.\n\n> #### _“We chose Google Cloud Platform because we believe that Google offer the most reliable cloud platform for our workload”_\n\nLike any large SaaS site, GitLab.com is a large, complicated system, and attributing availability changes to individual changes is extremely difficult, but here are a few factors which may be effecting our availability and performance:\n\n### Reason #1: Our Gitaly Fleet on GCP is much more powerful than before\n\nGitaly is responsible for all Git access in the GitLab application. Before Gitaly, Git access occurred directly from within Rails workers. Because of the scale we run at, we require many servers serving the web application, and therefore, in order to share git data between all workers, we relied on NFS volumes. Unfortunately this approach doesn't scale well, which led to us building Gitaly, a dedicated Git service.\n\n> #### _“We've opted to give our fleet of 24 Gitaly servers a serious upgrade”_\n\n#### Our upgraded Gitaly fleet\n\nAs part of the migration, we've opted to give our fleet of 24 [Gitaly](/blog/the-road-to-gitaly-1-0/) servers a serious upgrade. If the old fleet was the equivalent of a nice family sedan, the new fleet are like a pack of snarling musclecars, ready to serve your Git objects.\n\n| Environment | Processor                       | Number of cores per instance | RAM per instance |\n| ----------- | ------------------------------- | ---------------------------- | ---------------- |\n| Azure       | Intel Xeon Ivy Bridge @ 2.40GHz | 8                            | 55GB             |\n| GCP         | Intel Xeon Haswell @ 2.30GHz    | **32**                       | **118GB**        |\n\nOur new Gitaly fleet is much more powerful. This means that Gitaly can respond to requests more quickly, and deal better with unexpected traffic surges.\n\n#### IO performance\n\nAs you can probably imagine, serving [225TB of Git data](https://dashboards.gitlab.com/d/ZwfWfY2iz/vanity-metrics-dashboard?orgId=1) to roughly half-a-million active users a week is a fairly IO-heavy operation. Any performance improvements we can make to this will have a big impact on the overall performance of GitLab.com.\n\nFor this reason, we've focused on improving performance here too.\n\n| Environment | RAID         | Volumes | Media    | filesystem | Performance                                                            |\n| ----------- | ------------ | ------- | -------- | ---------- | ---------------------------------------------------------------------- |\n| Azure       | RAID 5 (lvm) | 16      | magnetic | xfs        | 5k IOPS, 200MB/s (_per disk_) / 32k IOPS **1280MB/s** (_volume group_) |\n| GCP         | No raid      | 1       | **SSD**  | ext4       | **60k read IOPs**, 30k write IOPs, 800MB/s read 200MB/s write          |\n\nHow does this translate into real-world performance? Here are average read and write times across our Gitaly fleet:\n\n##### IO performance is much higher\n\nHere are some comparative figures for our Gitaly fleet from Azure and GCP. In each case, the performance in GCP is much better than in Azure, although this is what we would expect given the more powerful fleet.\n\n[![Disk read time graph](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=458168633&format=image)](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=1002437172) [![Disk write time graph](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=884528549&format=image)](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=1002437172) [![Disk Queue length graph](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=2135164979&format=image)](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=1002437172)\n\nNote: For reference: for Azure, this uses the average times for the week leading up to the failover. For GCP, it's an average for the week up to October 2, 2018.\n\nThese stats clearly illustrate that our new fleet has far better IO performance than our old cluster. Gitaly performance is highly dependent on IO performance, so this is great news and goes a long way to explaining the performance improvements we're seeing.\n\n### Reason #2: Fewer \"unicorn worker saturation\" errors\n\n![HTTP 503 Status GitLab](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/facepalm-503.png)\n\nUnicorn worker saturation sounds like it'd be a good thing, but it's really not!\n\nWe ([currently](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/1899)) rely on [unicorn](https://bogomips.org/unicorn/), a Ruby/Rack http server, for serving much of the application. Unicorn uses a single-threaded model, which uses a fixed pool of workers processes. Each worker can handle only one request at a time. If the worker gives no response within 60 seconds, it is terminated and another process is spawned to replace it.\n\n> #### _“Unicorn worker saturation sounds like it'd be a good thing, but it's really not!”_\n\nAdd to this the lack of autoscaling technologies to ramp the fleet up when we experience high load volumes, and this means that GitLab.com has a relatively static-sized pool of workers to handle incoming requests.\n\nIf a Gitaly server experiences load problems, even fast [RPCs](https://en.wikipedia.org/wiki/Remote_procedure_call) that would normally only take milliseconds, could take up to several seconds to respond – thousands of times slower than usual. Requests to the unicorn fleet that communicate with the slow server will take hundreds of times longer than expected. Eventually, most of the fleet is handling requests to that affected backend server. This leads to a queue which affects all incoming traffic, a bit like a tailback on a busy highway caused by a traffic jam on a single offramp.\n\nIf the request gets queued for too long – after about 60 seconds – the request will be cancelled, leading to a 503 error. This is indiscriminate – all requests, whether they interact with the affected server or not, will get cancelled. This is what I call unicorn worker saturation, and it's a very bad thing.\n\nBetween February and August this year we frequently experienced this phenomenon.\n\nThere are several approaches we've taken to dealing with this:\n\n- **Fail fast with aggressive timeouts and circuitbreakers**: Timeouts mean that when a Gitaly request is expected to take a few milliseconds, they time out after a second, rather than waiting for the request to time out after 60 seconds. While some requests will still be affected, the cluster will remain generally healthy. Gitaly currently doesn't use circuitbreakers, but we plan to add this, possibly using [Istio](https://istio.io/docs/tasks/traffic-management/circuit-breaking/) once we've moved to Kubernetes.\n\n- **Better abuse detection and limits**: More often than not, server load spikes are driven by users going against our fair usage policies. We built tools to better detect this and over the past few months, an abuse team has been established to deal with this. Sometimes, load is driven through huge repositories, and we're working on reinstating fair-usage limits which prevent 100GB Git repositories from affecting our entire fleet.\n\n- **Concurrency controls and rate limits**: For limiting the blast radius, rate limiters (mostly in HAProxy) and concurrency limiters (in Gitaly) slow overzealous users down to protect the fleet as a whole.\n\n### Reason #3: GitLab.com no longer uses NFS for any Git access\n\nIn early September we disabled Git NFS mounts across our worker fleet. This was possible because Gitaly had reached v1.0: the point at which it's sufficiently complete. You can read more about how we got to this stage in our [Road to Gitaly blog post](/blog/the-road-to-gitaly-1-0/).\n\n### Reason #4: Migration as a chance to reduce debt\n\nThe migration was a fantastic opportunity for us to improve our infrastructure, simplify some components, and otherwise make GitLab.com more stable and more observable, for example, we've rolled out new **structured logging infrastructure**.\n\nAs part of the migration, we took the opportunity to move much of our logging across to structured logs. We use [fluentd](https://www.fluentd.org/), [Google Pub/Sub](https://cloud.google.com/pubsub/docs/overview), [Pubsubbeat](https://github.com/GoogleCloudPlatform/pubsubbeat), storing our logs in [Elastic Cloud](https://www.elastic.co/cloud) and [Google Stackdriver Logging](https://cloud.google.com/logging/). Having reliable, indexed logs has allowed us to reduce our mean-time to detection of incidents, and in particular detect abuse. This new logging infrastructure has also been invaluable in detecting and resolving several security incidents.\n\n> #### _“This new logging infrastructure has also been invaluable in detecting and resolving several security incidents”_\n\nWe've also focused on making our staging environment much more similar to our production environment. This allows us to test more changes, more accurately, in staging before rolling them out to production. Previously the team was maintaining\na limited scaled-down staging environment and many changes were not adequately tested before being rolled out. Our environments now share a common configuration and we're working to automate all [terraform](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/5079) and [chef](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/5078) rollouts.\n\n### Reason #5: Process changes\n\nUnfortunately many of the worst outages we've experienced over the past few years have been self-inflicted. We've always been transparent about these — and will continue to be so — but as we rapidly grow, it's important that our processes scale alongside our systems and team.\n\n> #### _“It's important that our processes scale alongside our systems and team”_\n\nIn order to address this, over the past few months, we've formalized our change and incident management processes. These processes respectively help us to avoid outages and resolve them quicker when they do occur.\n\nIf you're interested in finding out more about the approach we've taken to these two vital disciplines, they're published in our handbook:\n\n- [GitLab.com's Change Management Process](/handbook/engineering/infrastructure/change-management/)\n- [GitLab.com's Incident Management Process](/handbook/engineering/infrastructure/incident-management/)\n\n### Reason #6: Application improvement\n\nEvery GitLab release includes [performance and stability improvements](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&state=opened&label_name%5B%5D=performance); some of these have had a big impact on GitLab's stability and performance, particularly n+1 issues.\n\nTake Gitaly for example: like other distributed systems, Gitaly can suffer from a class of performance degradations known as \"n+1\" problems. This happens when an endpoint needs to make many queries (_\"n\"_) to fulfill a single request.\n\n> Consider an imaginary endpoint which queried Gitaly for all tags on a repository, and then issued an additional query for each tag to obtain more information. This would result in n + 1 Gitaly queries: one for the initial tag, and then n for the tags. This approach would work fine for a project with 10 tags – issuing 11 requests, but a project with 1000 tags, this would result in 1001 Gitaly calls, each with a round-trip time, and issued in sequence.\n\n![Latency drop in Gitaly endpoints](https://about.gitlab.com../../images/blogimages/whats-up-with-gitlab-com/drop-off.png)\n\nUsing data from Pingdom, this chart shows long-term performance trends since the start of the year. It's clear that latency improved a great deal on May 7, 2018. This date happens to coincide with the RC1 release of GitLab 10.8, and its deployment on GitLab.com.\n\nIt turns out that this was due to a [single fix on n+1 on the merge request page being resolved](https://gitlab.com/gitlab-org/gitlab-ce/issues/44052).\n\nWhen running in development or test mode, GitLab now detects n+1 situations and we have compiled [a list of known n+1s](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=performance&label_name[]=Gitaly&label_name[]=technical%20debt). As these are resolved we expect even more performance improvements.\n\n![GitLab Summit - South Africa - 2018](https://about.gitlab.com/images/summits/2018_south-africa_team.jpg)\n\n### Reason #7: Infrastructure team growth and reorganization\n\nAt the start of May 2018, the Infrastructure team responsible for GitLab.com consisted of five engineers.\n\nSince then, we've had a new director join the Infrastructure team, two new managers, a specialist [Postgres DBRE](https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/13778), and four new [SREs](https://handbook.gitlab.com/job-families/engineering/infrastructure/site-reliability-engineer/). The database team has been reorganized to be an embedded part of infrastructure group. We've also brought in [Ongres](https://www.ongres.com/), a specialist Postgres consultancy, to work alongside the team.\n\nHaving enough people in the team has allowed us to be able to split time between on-call, tactical improvements, and longer-term strategic work.\n\nOh, and we're still hiring! If you're interested, check out [our open positions](/jobs/) and choose the Infrastructure Team 😀\n\n## TL;DR: Conclusion\n\n1. GitLab.com is more stable: availability has improved 61 percent since we migrated to GCP\n1. GitLab.com is faster: latency has improved since the migration\n1. We are totally focused on continuing these improvements, and we're building a great team to do it\n\nOne last thing: our Grafana dashboards are open, so if you're interested in digging into our metrics in more detail, visit [dashboards.gitlab.com](https://dashboards.gitlab.com) and explore!\n",[2515,1296,9,1091,675,1295],"GKE",{"slug":2517,"featured":6,"template":680},"gitlab-com-stability-post-gcp-migration","content:en-us:blog:gitlab-com-stability-post-gcp-migration.yml","Gitlab Com Stability Post Gcp Migration","en-us/blog/gitlab-com-stability-post-gcp-migration.yml","en-us/blog/gitlab-com-stability-post-gcp-migration",{"_path":2523,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2524,"content":2529,"config":2534,"_id":2536,"_type":14,"title":2537,"_source":16,"_file":2538,"_stem":2539,"_extension":19},"/en-us/blog/gitlab-daily-tools",{"title":2525,"description":2526,"ogTitle":2525,"ogDescription":2526,"noIndex":6,"ogImage":690,"ogUrl":2527,"ogSiteName":667,"ogType":668,"canonicalUrls":2527,"schema":2528},"How to improve your daily GitLab experience","Personal tools and tips for a more productive GitLab experience","https://about.gitlab.com/blog/gitlab-daily-tools","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to improve your daily GitLab experience\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2019-11-26\",\n      }",{"title":2525,"description":2526,"authors":2530,"heroImage":690,"date":1898,"body":2532,"category":698,"tags":2533},[2531],"Viktor Nagy","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n\u003C!-- Content start here -->\n\nThis is a collection of tools and settings I use to create a more productive GitLab experience. \n\n*Disclaimer: all screenshots are using Firefox's Hungarian language setting.*\n\n## Easy navigation\n\nI use Firefox, but it should work in Chrome too. Basically, after bookmarking a website, you can add a `keyword` to it. This allows for quick navigation.\n\n![Firefox bookmarks with keywords](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/firefox-bookmarks.png){: .shadow.medium}\n\nMy keyworded navigation includes the following pages:\n\n- `gl-epics` --> [https://gitlab.com/groups/gitlab-org/-/epics?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=](https://gitlab.com/groups/gitlab-org/-/epics?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=)\n- `gl-issues` --> [https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=)\n- `gl-product` --> [https://gitlab.com/groups/gitlab-org/-/boards/1342179?label_name[]=group%3A%3Asystem&search=](https://gitlab.com/groups/gitlab-org/-/boards/1342179?label_name[]=group%3A%3Asystem&search=)\n- `gl-new` --> [https://gitlab.com/gitlab-org/gitlab/issues/new?issuable_template=Problem_Validation&issue[title]=](https://gitlab.com/gitlab-org/gitlab/issues/new?issuable_template=Problem_Validation&issue[title]=)\n\n## Easy search (complex way)\n\nThe quick-links above are nice, but you browser can do even more!\nYou can actually use the above keywords to pass a search query while you navigate to the given page.\n\n![Search with keywords](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/firefox-search.png){: .shadow.medium}\n\nBy writing `gl-new This is a new issue` a \"new issue\" page will open and prefill `This is a new issue` as the title. You can use this pre-fill mechnism to filter the issues, epics lists or a board too.\n\nHow can you achieve this? The argument we are passing for `gl-new` can be referenced as `%s` in the final url.\nThis means that my actual bookmarked urls are the following:\n\n- `gl-epics` --> [https://gitlab.com/groups/gitlab-org/-/epics?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=%s](https://gitlab.com/groups/gitlab-org/-/epics?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=%s)\n- `gl-issues` --> [https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=%s](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=%s)\n- `gl-product` --> [https://gitlab.com/groups/gitlab-org/-/boards/1342179?label_name[]=group%3A%3Asystem&search=%s](https://gitlab.com/groups/gitlab-org/-/boards/1342179?label_name[]=group%3A%3Asystem&search=%s)\n- `gl-new` --> [https://gitlab.com/gitlab-org/gitlab/issues/new?issuable_template=Problem_Validation&issue[title]=%s](https://gitlab.com/gitlab-org/gitlab/issues/new?issuable_template=Problem_Validation&issue[title]=%s)\n\n*Note:* unfortunately, only simple strings can be searched this way. Adding extra labels does not work.\n\n## Easy search (simple way)\n\nThe above is one way to search different sites easily. You can achieve something similar (without bookmarks) by adding\na new search engine for your browser.\n\n![Add a search engine](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/firefox-search-engine.png){: .shadow.medium}\n\nI have such search engines added for the GitLab documentation and the GitLab handbook. You can easily add a new search\nby right clicking the search bar on the GitLab docs site, and selecting the `Add a keyword for this search` menu option.\nUnfortunately, the above does not work for the handbook.\n\nTo add handbook search on Firefox, one can use the [Add custom search engine](https://addons.mozilla.org/hu/firefox/addon/add-custom-search-engine/) add-on (you can remove it after adding the engine). On Chrome, you can just add the engine under \nyour settings. To search the handbook, I use Google's site search functionality, and my search engine contains the following url: [https://www.google.com/search?q=site%3Ahttps%3A%2F%2Fabout.gitlab.com%2Fhandbook+%s](https://www.google.com/search?q=site%3Ahttps%3A%2F%2Fabout.gitlab.com%2Fhandbook+%s)\n\n## Quick actions made _really_ quick\n\nI often find myself repeating the same actions, such as adding the same labels to multiple issues or assigning issues to myself. When I want to apply a label, I have to manually type most of the label and autocompleting `~\"workflow::product validation\"` does not help much unfortunately. So I came up with a different solution.\n\nThere is a handy browser plugin that allows you to script around any webpage. It's called [TamperMonkey](https://www.tampermonkey.net/). I have created some *VeryQuickActions* using this plugin. \nDo you have a similar `Quick action` line in your GitLab input areas?\n\n![Quick Actions addon](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/tm-quickactions.png){: .shadow.medium}\n\nYou can get those nice links at the bottom of the filed by [adding its script](https://gitlab.com/gitlab-com/www-gitlab-com/-/snippets/1999778) to TamperMonkey.\n\nCustomizing these quick actions is quite easy and does not require advance programming skills.\nknowledge. You can open the above script (TamperMonkey) has a built-in editor for this.\n\nIf you would like to change the content of these quick links, you can use `TamperMonkey`'s built-in editor. Just look for the following lines:\n\n```js\n    const actions = [\n        ['/assign me', 'Mine'],\n        ['/label ~\"group::system\"', 'System label'],\n        ['/label ~\"workflow::problem validation\"', 'Problem label'],\n        ['/label ~\"workflow::solution validation\"', 'Solution label'],\n        ['/label ~\"workflow::validation backlog\"', 'Backlog label'],\n    ]\n```\n\nThese lines define the links that will be created. The first item in the lists show what will be included in the description or comment text on GitLab. The second item defines the text on the link.\nYou can use these as a guideline to create your own.\n\n*Note:* there is still a missing feature I would like to add to this script: I would like to make it easy to assign an issue\nto the previously viewed epic.\n\n## Filtering to-do's and checkboxes\n\nWhile I was on-boarding as a new GitLab team member, I ran a few scripts in the developer console to hide already checked checkboxes in a list and to dim the lines that did not contain my name. Since then, I have found myself needing similar functionality from time to time.\n\n![Filter checkboxes](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/tm-filter.png){: .shadow.medium}\n\nOn the above image. Which checkboxes are relevant to me?\n\nAgain, [the solution is a TamperMonkey script](https://gitlab.com/gitlab-com/www-gitlab-com/-/snippets/1999779). This script adds a small filter button\nbeside the GitLab search box. Filtering issues leaves (or excludes) only those checkboxes on the page that contain your search term.\nIf you want to exclude the search term, start your filter with an exclamation mark `!`.\n\n![Filter checkboxes](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/tm-filter2.png){: .shadow.medium}\n\n## What are your tips and tricks\n\nWe would love to hear your tips and tricks for using GitLab. Feel free to leave them in a comment below.",[9,723],{"slug":2535,"featured":6,"template":680},"gitlab-daily-tools","content:en-us:blog:gitlab-daily-tools.yml","Gitlab Daily Tools","en-us/blog/gitlab-daily-tools.yml","en-us/blog/gitlab-daily-tools",{"_path":2541,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2542,"content":2548,"config":2554,"_id":2556,"_type":14,"title":2557,"_source":16,"_file":2558,"_stem":2559,"_extension":19},"/en-us/blog/gitlab-dedicated-available",{"title":2543,"description":2544,"ogTitle":2543,"ogDescription":2544,"noIndex":6,"ogImage":2545,"ogUrl":2546,"ogSiteName":667,"ogType":668,"canonicalUrls":2546,"schema":2547},"GitLab Dedicated single-tenant SaaS now generally available","Achieve control and convenience with a fully managed DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663948/Blog/Hero%20Images/dedicatedcoverimage.png","https://about.gitlab.com/blog/gitlab-dedicated-available","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Dedicated single-tenant SaaS now generally available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Thomas\"}],\n        \"datePublished\": \"2023-06-15\",\n      }",{"title":2543,"description":2544,"authors":2549,"heroImage":2545,"date":2551,"body":2552,"category":675,"tags":2553},[2550],"Andrew Thomas","2023-06-15","\nLast year, we launched the [Limited Availability release of GitLab Dedicated](https://about.gitlab.com/blog/introducing-gitlab-dedicated/), a fully managed, single-tenant SaaS deployment of our comprehensive DevSecOps platform designed to address the needs of customers with stringent compliance requirements. Since then, we’ve worked closely with our Limited Availability customers, incorporating their feedback into targeted improvements and essential new features. \n\nWe are excited to share that [GitLab Dedicated is now generally available](https://about.gitlab.com/dedicated/), complete with compliance features such as the ability for customers to encrypt the data stored in their instance with their own encryption key.\n\nWith GitLab Dedicated, organizations can access all of the benefits of the DevSecOps platform – including faster releases, better security, and more productive developers – while satisfying compliance requirements such as data residency, isolation, and private networking.\n\nAccording to [GitLab’s 2023 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2023/), 44% of operations professionals said that their current role involves managing hardware and/or infrastructure “all of the time” or “most of the time.” GitLab Dedicated alleviates that burden, enabling organizations to focus on their core business model and meet their compliance needs without the overhead of managing a complex DevSecOps environment.\n\n## Flexibility and convenience\nOrganizations can achieve a lower total cost of ownership and quicker time to value with GitLab Dedicated, compared to hosting the platform themselves, while maintaining high operational standards.\n\n**A fully managed solution:** When software is not upgraded to the latest versions, organizations use obsolete and inefficient software that can be exposed to security threats. Because GitLab Dedicated is fully managed by GitLab, customers get access to the latest software features and security updates. \n\n**Data residency in the region of your choice:** Customers frequently ask us about data residency to meet stringent compliance requirements, which vary across different regions around the world. GitLab Dedicated can be deployed in [30+ regions](https://docs.gitlab.com/ee/subscriptions/gitlab_dedicated/) to meet these requirements.\n\n**High availability and scalability:** To meet the needs of large or rapidly scaling organizations, GitLab Dedicated uses a cloud native architecture that can support up to 50,000 users, with a disaster recovery plan and [availability targets](https://about.gitlab.com/handbook/engineering/infrastructure/team/gitlab-dedicated/slas/) to satisfy reliability needs. \n\n## Control and compliance\nThe need to have control over data and achieve compliance has never been greater. GitLab Dedicated offers data residency, tenant isolation, and private networking to help customers meet stringent compliance requirements. \n\n**Enterprise-grade security:** Customers require assurance that their data and access to their data is secure. GitLab Dedicated allows customers to implement necessary controls to protect their software delivery platform and meet compliance requirements. This includes access control using SAML-based authentication and authorization, secure communications with IP allow lists, private connectivity, and data encryption both at rest and in transit. \n\n**Full data and source code IP isolation:** As a single-tenant deployment, GitLab Dedicated helps to isolate data and source code from other tenants. Customers can also choose to encrypt the data stored in their instance with their own encryption key. \n\n**Full control over your data:** While GitLab fully manages the DevSecOps platform, customers have full control over the data it hosts, the region the data resides in, and securing the data themselves. Customers also retain full administrative access to the DevSecOps platform itself.\n\n## Looking ahead: AI and GitLab Dedicated\nGitLab Dedicated is a single-tenant deployment preferred by organizations with complex compliance requirements, so we plan to integrate AI into GitLab Dedicated without compromising on compliance requirements like data residency, isolation, and predictability. \n- In the near term, we will introduce AI features like Code Suggestions and Suggested Reviewers into GitLab Dedicated once they are made generally available.\n- In the long term, we will explore incorporating native AI capabilities such as training models to generate tailored insights and suggestions while keeping data private.\n\nTo learn more about what’s coming, follow the [GitLab Dedicated roadmap](https://about.gitlab.com/direction/saas-platforms/dedicated/#roadmap).\n\n## Learn more about GitLab Dedicated\nGitLab Dedicated includes all of the capabilities of GitLab Ultimate, with the added benefits of single-tenant architecture, regional data residency, and platform management by GitLab. With GitLab Dedicated, customers can realize operational efficiencies and deliver secure software faster. \n\nLearn more about [GitLab Dedicated](https://about.gitlab.com/dedicated/) today.\n",[1440,9,1342],{"slug":2555,"featured":6,"template":680},"gitlab-dedicated-available","content:en-us:blog:gitlab-dedicated-available.yml","Gitlab Dedicated Available","en-us/blog/gitlab-dedicated-available.yml","en-us/blog/gitlab-dedicated-available",{"_path":2561,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2562,"content":2567,"config":2573,"_id":2575,"_type":14,"title":2576,"_source":16,"_file":2577,"_stem":2578,"_extension":19},"/en-us/blog/gitlab-design-library",{"title":2563,"description":2564,"ogTitle":2563,"ogDescription":2564,"noIndex":6,"ogImage":1452,"ogUrl":2565,"ogSiteName":667,"ogType":668,"canonicalUrls":2565,"schema":2566},"Scaling design: The start of system thinking","How we began the process of introducing a design system to GitLab.","https://about.gitlab.com/blog/gitlab-design-library","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Scaling design: The start of system thinking\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taurie Davis\"}],\n        \"datePublished\": \"2017-12-12\",\n      }",{"title":2563,"description":2564,"authors":2568,"heroImage":1452,"date":2570,"body":2571,"category":743,"tags":2572},[2569],"Taurie Davis","2017-12-12","\n\nScaling design within an application is a struggle. Design systems help alleviate problems that arise with scaling by making it easier to find inconsistent interactions or conflicting messaging. However, it can be extremely difficult to introduce a new system to teams that are already functioning without one. Here's how we got started.\n\n\u003C!-- more -->\n\nWe took the initial step towards establishing our own system by creating a pattern library of reusable components that can be shared and reused across the application.\n\n## Design as a language\n\nConsistency within the UI and increased iteration speed are clear benefits for using a design library. This helps keep the application [DRY](http://programmer.97things.oreilly.com/wiki/index.php/Don't_Repeat_Yourself) and allows designers to focus their efforts on solving user needs, rather than recreating elements and reinventing solutions. In an effort to create a library that is understood by multiple teams, it's important to begin thinking about design as a language.\n\nYour design language is an integral part of a design system that clearly defines the semantics of your visual designs and allows your team to thoroughly document guidelines. It's important that the team not only understands how the system is built, but also the reasoning behind the choices made. This will ultimately help enable your team to build a library of components that support the semantics you have established.\n\n## Getting started\n\nKnowing where to start can be daunting. We began by first understanding the current state of our application. By auditing current designs that were implemented, we found numerous inconsistencies across our interface and determined that we lacked a solid design language to build from. A search within our variables revealed that we had **82 different gray values defined within the UI**. We also had an undefined type scale that included **at least 30 different values** in pixels, rems, and percentages.\n\nBy understanding the problems our current system had, we were able to start building a solid foundation to work from. We defined and documented our perceptual patterns which included styles that aid in the aesthetic of the brand: typography, icons, colors, and a measurement system.\n\n{: .text-center}\n![Library foundation](https://about.gitlab.com/images/blogimages/gitlab-design-library/library--styles@2x.png){: .shadow}\n\nOnce our perceptual patterns were defined, we started applying them to our components. We took a couple core pieces of our application and mocked them up using our new guidelines to ensure that our new rules were not too rigid and would be flexible enough to still encourage the creation of new ideas and methods while designing new components.\n\nOnce we nailed down our styles, we were able to start identifying functional patterns that needed to be built out using our new guidelines. Functional patterns include global modules that can be reused throughout your application, such as buttons, dropdowns, and tabs.\n\nThere were a few instances where our newly defined styles did not work well in our actual designs. For example, we determined that our 8px measurement system was too strict for right and left padding on horizontal tabs, buttons, and inputs. Although it was not a part of our measurement system, we decided as a team to create a new rule that would allow for a 12px measure in order better align stacked items while giving elements enough room to breathe.\n\n{: .text-center}\n![Library foundation](https://about.gitlab.com/images/blogimages/gitlab-design-library/library--measures@2x.png){: .shadow}\n\nBuilding out these components gave us the opportunity to alter and add to our new perceptual patterns. It is okay to allow some flexibility within your design library, so long as the rules and use cases are clearly defined.\n\n## Structure\n\nWe set up our design library using a [primary sketch file](https://gitlab.com/gitlab-org/gitlab-design/blob/master/production/resources/gitlab-elements.sketch) that includes all the components and styles that have been added to our team library. As we began building out multiple components, it was important to define a structure that would mimic the way components are implemented on the frontend. This would allow the design and frontend teams to work more closely together, ensuring that components were DRY and reusable. We chose to implement [Brad Frost's Atomic Design](http://bradfrost.com/blog/post/atomic-web-design/) principles in order to accomplish this. Atomic design \"break[s] entire interfaces down into fundamental building blocks,\" ensuring that everything is constructed in a methodical way. These building blocks consist of:\n\n**Atoms:** Elements that cannot be broken down further. This can include type styles, buttons, labels, and inputs\n\n**Molecules:** A group of atoms that function as a unit, such as a form.\n\n**Organisms:** A high-level component that consists of several molecules to make up its own structure. This can include a header or a sidebar.\n\nThere has been a lot written on Atomic Design. To learn more I recommend:\n\n- [Atomic Design by Brad Frost](http://atomicdesign.bradfrost.com/)\n- [Atomic Design by Brad Frost - An Event Apart video](https://vimeo.com/179245570)\n- [Pattern Lab](http://patternlab.io/)\n\nFollowing this structure forces the team to think carefully about what each part of a design is made up of, as well as easily define global components. If a modifier consists of atoms that are not used elsewhere, we encourage designers to think about whether a specific atom is necessary for that paradigm or if an existing global component would work in its place.\n\nIn the following example, we've built out our left navigational sidebar. This organism comprises molecules, and these molecules comprise globally used atoms (an avatar, badge, typography, and icons). We also include molecule modifiers, which make it easy to see the different states that a molecule can have. These together build the basis of the sidebar.\n\n{: .text-center}\n![Library foundation](https://about.gitlab.com/images/blogimages/gitlab-design-library/library--atomic@2x.png){: .shadow}\n\nWe use [symbols within Sketch](https://sketchapp.com/docs/symbols/) to create our atoms and molecules, while leaving organisms as groups so that we can easily modify and override specific aspects to fit the design we are working on.\n\n## Tooling\n\nChoosing tools can be an arduous task, especially with the number of options available for designers today. It is easy to get caught up in the latest tool and turn progress into tool churn. At GitLab, we took the time to evaluate multiple tools that would assist in the creation of a team library.\n\nSome of the issues we ran into while evaluating plugins were:\n\n- Slow performance, as well as bugs, when adding, changing, and renaming components\n- Overriding options when adding symbols to a new document were not pulled in or included automatically\n- Text styles weren't being saved or included in symbols that were pulled into a new document\n\nWe eventually decided to move forward using [Brand.ai](https://brand.ai) as a plugin for Sketch. This plugin solved many of the issues we were running into with other tools. However, while this plugin was the best that we found at the time, no tool is perfect:\n\n- Brand. ai limits the organization of components to one level deep\n- While faster and less buggy than other plugins, Brand.ai is still not as fast as we would like :rocket:\n\n{: .text-center}\n![Library foundation](https://about.gitlab.com/images/blogimages/gitlab-design-library/library--brandai@2x.png){: .shadow}\n\nAt GitLab, we don't look at Brand.ai as the answer. It is solely a tool to help aid us in the creation process. Since deciding on using Brand.ai, Sketch has released their own [library feature](https://blog.sketchapp.com/libraries-an-in-depth-look-56b147022e1f), Brand.ai was [acquired by InVision](https://www.invisionapp.com/blog/announcing-invision-design-system-manager/), and Figma has added numerous new features to aid in the creation of a design library. Tools are constantly transforming, but it's important to keep in mind that constantly changing tools may slow progress. Evaluate your tools carefully and decide what is best for your team at this moment. Remember that pattern libraries are only one aspect of a design system that helps make it more effective. The tools and technologies you use to create the library are meant to help your team, not act as the solution.\n\n## Moving forward\n\nConversations around design systems have exploded in recent years. Just over the last few months, Figma has begun sponsoring [Design System Dinners](https://www.designsystems.com/), InVision has created a [Design Systems Handbook](https://www.designbetter.co/design-systems-handbook/introducing-design-systems), and Smashing Magazine released [*Design Systems*](https://www.smashingmagazine.com/design-systems-book/) as their newest book.\n\nAt GitLab, we have only just begun the work on our design system. A design library is only the first part of our overall goal and it is our first step towards ensuring that our design will scale within the growing organization. We have begun thinking about design with a system in mind by creating a design language that captures the visual styles of our brand, as well as creating reusable and robust components. We've chosen tools and technologies that help aid us in this process while remembering that they are always evolving and are not the system itself.\n\nBeyond continuing to build out new paradigms within our design library, our next step is to begin linking our design library with our frontend code. This will allow us to include not only our designs and documentation, but also code snippets that can be used and referenced in our application. We have only just started this process and are in the very early stages of setting up a [repository](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com) to showcase our system.\n\nIf you have any tips, tricks, or lessons that you discovered while building out your own design library or system, we would love to hear from you!\n\n## Resources\n\n- [gitlab-elements.sketch](https://gitlab.com/gitlab-org/gitlab-design/blob/master/production/resources/gitlab-elements.sketch)\n- [GitLab Brand.ai](https://brand.ai/git-lab/primary-brand)\n- [Design Repo](https://gitlab.com/gitlab-org/gitlab-design)\n",[1698,700,9],{"slug":2574,"featured":6,"template":680},"gitlab-design-library","content:en-us:blog:gitlab-design-library.yml","Gitlab Design Library","en-us/blog/gitlab-design-library.yml","en-us/blog/gitlab-design-library",{"_path":2580,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2581,"content":2587,"config":2592,"_id":2594,"_type":14,"title":2595,"_source":16,"_file":2596,"_stem":2597,"_extension":19},"/en-us/blog/gitlab-employees-on-working-at-gitlab",{"title":2582,"description":2583,"ogTitle":2582,"ogDescription":2583,"noIndex":6,"ogImage":2584,"ogUrl":2585,"ogSiteName":667,"ogType":668,"canonicalUrls":2585,"schema":2586},"What GitLab employees like about working at GitLab","We're often asked about what it's like to work at GitLab. Every GitLab team member answers this question a little differently.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679222/Blog/Hero%20Images/2015_amsterdam_team.jpg","https://about.gitlab.com/blog/gitlab-employees-on-working-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What GitLab employees like about working at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2016-04-26\",\n      }",{"title":2582,"description":2583,"authors":2588,"heroImage":2584,"date":2589,"body":2590,"category":808,"tags":2591},[950],"2016-04-26","\n\nWe're often asked about what it's like to work at GitLab. Every GitLab team\nmember answers this question a little differently. But there were some\nnoticeable themes across each of their answers. We've highlighted some of\nthose key themes that making working at GitLab great.\n\n\u003C!--more-->\n\n## Gathering Feedback from our Team\n\nWe pay close attention to what our employees like and don’t like about\nworking at GitLab. To get insight from the team, we send out an anonymous\nsurvey to all GitLab team-members on a regular basis. The goal of the survey\nis to facilitate an open environment for people to share their thoughts,\nask questions, or raise potential concerns. Feedback, even if it is only\nmentioned by one employee, is acknowledged and addressed. Most often, the\nfeedback captured in the survey is addressed in our [Team Call](/handbook/communication/#team-call).\n\n## Five key themes\n\nFive key themes consistently came out as we got feedback from the team.\n\n**The people.** Our team members have described each other with words like talented,\ncaring, approachable, honest, frank, smart, brilliant, and skilled.\n\n**The product.** One of our team members summed it up nicely when she said,\n“It’s great working on a product that I actually love to use.”\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">I must say, working in the open full time on OSS (in Ruby even!) \u003Ca href=\"https://twitter.com/gitlab\">@gitlab\u003C/a> is a very nice and welcome change. Great team and product.\u003C/p>&mdash; Josh Frye (@joshfng) \u003Ca href=\"https://twitter.com/joshfng/status/687994632454672385\">January 15, 2016\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"//platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n**Open source values.** Being involved in an open source project is motivating,\nbecause it’s not just a category, it’s a philosophy. Or to put it in the words of\nan anonymous team member, “working on open-source while getting paid for it is a dream job!”\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Things that I love about working at \u003Ca href=\"https://twitter.com/gitlab\">@gitlab\u003C/a>: Getting in touch with the OSS community. Awesome experience so far.\u003C/p>&mdash; A. Felipe Cabargas (@juanpintoduran) \u003Ca href=\"https://twitter.com/juanpintoduran/status/713573732829249536\">March 26, 2016\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"//platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n**Freedom.** On this theme, it's important to note that GitLab is a remote-only company,\nmeaning our employees are dispersed across the world. A lot of personal independence comes\nwith working fully remote, and the people who are drawn to GitLab tend to appreciate that.\nHere's a glimpse into our remote-only work culture.\n\n\u003Ciframe width=\"854\" height=\"480\" src=\"https://www.youtube.com/embed/NoFLJLJ7abE\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\nAnd last, but certainly not least, the opportunity.\n\n**Opportunity.** There is a great opportunity to learn from the people around you.\nGitLab has a large community of teachers and learners helping each other.\nSome of these people exist within our company and others are people who contribute their\nideas and knowledge to GitLab. Additionally, the fact that the company is growing presents\nan opportunity for team members to expand their skillset into new areas or grow to take\non a leadership role in their existing functional expertise. One GitLab team-member\nsaid, \"you have the ability to take on multiple hats and responsibilities, [and] everyone\nand everything is open to constant improvement.”\n\nHere’s a [more detailed presentation](https://docs.google.com/presentation/d/1h9P8Vf_6fzPbLCCahvwtIF5j_cH54zsv9iRSseVZzl0/edit#slide=id.gd443388ea_2_173) on what our team likes and dislikes, including what\nthe challenges and problems are, and how we’re dealing with them. We’re proud of the fact that\nour employees are pretty happy and we're committed to maintaining that! Perhaps, we'll\neven see more GitLab team-members setting up branded home offices\nlike our Service Engineer, [Drew Blessing](https://twitter.com/drewblessing).\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">.\u003Ca href=\"https://twitter.com/gitlab\">@GitLab\u003C/a> is hiring! Work wherever you&#39;re most comfortable...even your own GitLab-branded home office \u003Ca href=\"https://twitter.com/hashtag/remotework?src=hash\">#remotework\u003C/a> \u003Ca href=\"https://t.co/VMEhBui0Yh\">pic.twitter.com/VMEhBui0Yh\u003C/a>\u003C/p>&mdash; Drew Blessing (@drewblessing) \u003Ca href=\"https://twitter.com/drewblessing/status/697510602965553156\">February 10, 2016\u003C/a>\u003C/blockquote>\u003Cscript async src=\"//platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\nHave a question or a comment? As always, [give us a shout.](https://twitter.com/gitlab)\n",[832,9],{"slug":2593,"featured":6,"template":680},"gitlab-employees-on-working-at-gitlab","content:en-us:blog:gitlab-employees-on-working-at-gitlab.yml","Gitlab Employees On Working At Gitlab","en-us/blog/gitlab-employees-on-working-at-gitlab.yml","en-us/blog/gitlab-employees-on-working-at-gitlab",{"_path":2599,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2600,"content":2606,"config":2611,"_id":2613,"_type":14,"title":2614,"_source":16,"_file":2615,"_stem":2616,"_extension":19},"/en-us/blog/gitlab-empowers-minorities-in-tech-with-erg",{"title":2601,"description":2602,"ogTitle":2601,"ogDescription":2602,"noIndex":6,"ogImage":2603,"ogUrl":2604,"ogSiteName":667,"ogType":668,"canonicalUrls":2604,"schema":2605},"We're working to empower Minorities in Tech with a new employee resource group","People of color are more likely than any other group to voluntarily leave their jobs in tech. Employee resource groups, mentorship opportunities, and allyship can create a more inclusive workplace.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681369/Blog/Hero%20Images/dib-mit-2.png","https://about.gitlab.com/blog/gitlab-empowers-minorities-in-tech-with-erg","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're working to empower Minorities in Tech with a new employee resource group\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-06-18\",\n      }",{"title":2601,"description":2602,"authors":2607,"heroImage":2603,"date":2608,"body":2609,"category":299,"tags":2610},[672],"2020-06-18","\n\n_This is the final part of our three-part series on diversity, inclusion and belonging. [Part one](/blog/our-journey-to-a-diverse-and-inclusive-workplace/) focuses on GitLab's diversity goals and efforts to date. [Part two](/blog/what-diversity-inclusion-and-belonging-looks-like-in-the-tech-industry/) examines some of the challenges with diversity, inclusion, and belonging in the tech industry as a whole, and shares some recommendations on how to overcome them._\n\nPeople of color are more likely than any other group to voluntarily leave their jobs in the tech industry due to persistent unfairness in the workplace – whether that is due to stereotyping and discrimination, or because opportunities for career advancement are lacking.\n\nIn the [Tech Leavers Study](https://www.kaporcenter.org/wp-content/uploads/2017/08/TechLeavers2017.pdf) by the Kapor Center for Social Impact, one-quarter of men and women from underrepresented groups reported experiences of stereotyping in the workplace.\n\n“One of the discriminations that I’ve had to deal with outright is my size and my skin color,\" says [Sharif Bennett](/company/team/#SharifATL), mid-market account executive at GitLab. “I’m about 6'3 and some would relay back to me that people that had a conversation about me and one of the things that were said about me was that I was intimidating and scary.\"\n\nThe persistence of stereotyping and discrimination in the tech workplace is just one reason why [employee resource groups (TMRGs)](https://medium.com/@sarah.cordivano/employee-resource-groups-part-1-b684aa249420), such as [GitLab’s Minorities in Tech (MIT)](/company/culture/inclusion/erg-minorities-in-tech/) TMRG, are so important. TMRGs allow team members to create a safe space for cultural exchange, connection, and opportunities for growth and mentorship. Beyond the benefits of creating a more inclusive workplace, TMRGs can help support diversity initiatives, such as offering community insight into culturally sensitive marketing campaigns or recruiting and advancing top talent from diverse talent pools.\n\n## Inside the MIT TMRG\n\nIn fall 2019, [Candace Byrdsong Williams](/company/team/#cwilliams3), diversity and inclusion partner, and GitLab team members launched four TMRGs: Pride, Women, DiversABILITY, Minorities in Tech (MIT), and the Gender Minorities TMRG is new and forthcoming.\n\nOne of the TMRGs that has come the furthest since the launch is the MIT TMRG, led by Sharif, and [Aricka Flowers](/company/team/#atflowers), manager, Digital Production at GitLab. MIT aims to create a safe space for underrepresented minorities to communicate, unite, and share experiences while working at GitLab, but participation is open to allies as well.\n\n> \"With the MIT TMRG, we aim to create a safe space for minority-identifying GitLab team members and allies to come together and have a measurable impact on how our company not only increases the diversity within our workforce but also retains and supports those who are from underrepresented communities,\" says Aricka, co-lead of the [DIB Advisory Group](/company/culture/inclusion/influencer-group-guide/). “As we launch into our first full year of operation, we have identified four priorities for the MIT TMRG and are piloting a mentorship program that gives members direct access to top leadership and enables increased support and visibility for, and communication from, diverse voices within our organization.\"\n\nMIT has four major initiatives for 2020:\n\n*   Recruit more underrepresented minorities to different roles at GitLab\n*   Participate in external events (now virtually) with community partners\n*   Internal outreach to encourage more participation from underrepresented minorities and allies at GitLab (e.g., create an internal newsletter)\n*   [Launch of the MIT Mentorship Program](/company/culture/inclusion/erg-minorities-in-tech/mentoring/), which connects underrepresented minorities with mentors who could range from people managers to senior leadership\n\n“We’ve all been at organizations where we’ve felt there has been a lack of mentorship for people of color and minorities,\" says Sharif, who along with [Darva Satcher](/company/team/#engineering-manager-create-ds), backend engineering manager Create:Knowledge and Create: Editor at GitLab, is leading the mentorship initiative.\n\n“Mentoring generally happens with executives and most of the time they mentor people who remind them of themselves,\" says Sharif. \"We wanted to develop a program that was geared toward people of color and members of the group to gain exposure as to what goes on at the executive level and c-suite executive level.\"\n\n## MIT TMRG launches mentorship program\n\nToday, GitLab has no Black or Latinx folks in leadership positions. The lack of representation for people of color at the leadership level at GitLab is a significant problem, and we are committed to remedying this by hiring or promoting a Black leader to the director level or above by 2021. [Read part one of our blog series](/blog/our-journey-to-a-diverse-and-inclusive-workplace/) to learn more about our DIB strategy.\n\nOne of the best ways to make GitLab, and the tech leadership landscape at-large, less white and male is to create opportunities for advancement specifically for people of color. Creating opportunities for growth for underrepresented minorities through mentorship with executive and senior leadership is among the most valuable diversity initiatives a company can launch, according to the [Harvard Business Review (HBR)](https://hbr.org/cover-story/2019/11/toward-a-racially-just-workplace).\n\nThe MIT Mentorship Program pilot program kicked off in June 2020, and has matched 20 underrepresented minorities at GitLab with managers and executives who have taken a similar career path. The mentee-mentor pair will identify a problem at GitLab and will work together on a project to address this gap. For example, [Romer Gonzalez](/company/team/#romerg), mid-market account executive for Latin America, at GitLab and co-lead for the DIB Advisory Group, will be working with [David Hong](/company/team/#dhong), VP of field operations at GitLab, to expand GitLab’s footprint in Latin America.\n\nThe program lasts from late June to early August 2020, and ends in a [virtual showcase](/company/culture/inclusion/erg-minorities-in-tech/mentoring/program-structure/#what-will-be-presented-in-the-showcase) in September.\n\n## We need allies in leadership\n\nWe have encouraged leaders to set the tone by creating a safe space to talk about the everyday experiences of bias, discrimination, and microaggressions that so many underrepresented minorities face in the workplace. During the DIB roundtable discussion during GitLab's company-wide meeting known as Contribute (virtual for 2020), executive leaders engaged MIT TMRG members in an honest conversation about discrimination and microaggressions in the workplace. By creating a space for candid discussion about a painful and personal topic, our leaders are taking an important step in cultivating a more inclusive work culture.\n\nSharif points out that it cannot be the responsibility of underrepresented minorities in MIT or the other TMRGs to launch diversity initiatives – allies need to join the conversation and use their privilege to advance these initiatives.\n\n“Big wins will come from interrogating seemingly mundane practices and processes, and holding managers and leaders accountable for progress toward your organization’s aspirations,\" [Dr. Melissa Thomas-Hunt](https://www.linkedin.com/in/melissa-thomas-hunt-2843196/) of Airbnb told [HBR](https://hbr.org/2019/11/the-day-to-day-work-of-diversity-and-inclusion).\n\nThe good news is, we continue to make progress at GitLab because we have made DIB a strong priority for our company.\n\n## DIB is an opportunity for GitLab\n\nBuilding a diverse and inclusive work culture where everyone feels a sense of belonging can feel daunting, but it really depends upon your mindset, says Candace. The business case for hiring from a diverse talent pool and creating growth opportunities for underrepresented minorities is clear, but there is also [intrinsic value that comes from creating an inclusive company](https://hbr.org/2018/07/the-other-diversity-dividend). By perceiving DIB as an opportunity and solution, as opposed to a problem, we can go much further.\n\n\"In tech, we’re always taught to ask what is the problem we’re trying to solve and so I would always advise fellow companies that diversity, inclusion, and belonging isn’t a problem to solve. It’s an opportunity and it’s something we should leverage,\" says Candace.\n\n_A sincere thank you to Candace, Aricka, Sharif, and the whole Minorities in Tech (MIT) team for sharing valuable insights to help make this blog series possible, and thank you to our colleagues who are committed to making GitLab a more inclusive workplace._\n\n_[Read part one of our blog series](/blog/our-journey-to-a-diverse-and-inclusive-workplace/) to learn more about our journey and strategy to accelerating progress in DIB at GitLab, and [part two to unpack some of the diversity challenges of the tech industry](/blog/what-diversity-inclusion-and-belonging-looks-like-in-the-tech-industry/), and how different companies are prioritizing DIB initiatives._\n",[9],{"slug":2612,"featured":6,"template":680},"gitlab-empowers-minorities-in-tech-with-erg","content:en-us:blog:gitlab-empowers-minorities-in-tech-with-erg.yml","Gitlab Empowers Minorities In Tech With Erg","en-us/blog/gitlab-empowers-minorities-in-tech-with-erg.yml","en-us/blog/gitlab-empowers-minorities-in-tech-with-erg",{"_path":2618,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2619,"content":2625,"config":2632,"_id":2634,"_type":14,"title":2635,"_source":16,"_file":2636,"_stem":2637,"_extension":19},"/en-us/blog/gitlab-first-esg-and-dib-reports",{"title":2620,"description":2621,"ogTitle":2620,"ogDescription":2621,"noIndex":6,"ogImage":2622,"ogUrl":2623,"ogSiteName":667,"ogType":668,"canonicalUrls":2623,"schema":2624},"GitLab’s first ESG and DIB reports: Here’s what to know","Learn why Environmental, Social, and Governance and Diversity, Inclusion, and Belonging are integral to GitLab’s business and culture.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669919/Blog/Hero%20Images/gitlabbasic.png","https://about.gitlab.com/blog/gitlab-first-esg-and-dib-reports","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s first ESG and DIB reports: Here’s what to know\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sherida McMullan\"},{\"@type\":\"Person\",\"name\":\"Stacy Cline\"}],\n        \"datePublished\": \"2023-07-26\",\n      }",{"title":2620,"description":2621,"authors":2626,"heroImage":2622,"date":2629,"body":2630,"category":675,"tags":2631},[2627,2628],"Sherida McMullan","Stacy Cline","2023-07-26","\nEnvironmental, Social, and Governance (ESG) and Diversity, Inclusion, and Belonging (DIB) are at the center of many conversations right now, but at GitLab, these are two important issues that have been integral parts of GitLab’s business and culture, even before we explicitly used the term “ESG.” \n\nHow we think about this work shows up in the management and oversight of our business. It’s evident in how we approach remote work and develop our products, and our approach aligns with our [values](https://handbook.gitlab.com/handbook/values/), one of which is [DIB](https://handbook.gitlab.com/handbook/values/#diversity-inclusion). \n\nWhile we’ve always woven ESG practices into our business, we heard from you, our customers, our investors, our community members, and our team members, that there was more that we could do. That’s why we published our first ESG and DIB reports. With our transparency value in mind, we felt it was key to bring all of these important conversations into one forum, share our progress, and our commitment to continue iterating and to take action on the key topics we and our stakeholders consider most important.\n\nEarlier this year, we conducted our first double materiality assessment. During this process, we spoke with our stakeholders to understand where they want to see GitLab focus our ESG efforts and where we have the potential to have the greatest impact on the environment, society, and our global communities.\n\nSix key topics rose to the top. \n\n## 1. Diversity, Inclusion, and Belonging \nDIB is fundamental to the success of GitLab and, as such, is one of our core values. We incorporate the DIB value into all that we do. As a global company, we strive for a team that is representative of our users. We aim to create a work environment that is transparent in nature and fosters a space in which everyone is welcomed. We’ve made great strides in our aspirational DIB goals but we’re not stopping here. Here are a few highlights:\n\n* We increased [underrepresented group](https://about.gitlab.com/company/culture/inclusion/#examples-of-select-underrepresented-groups) representation across all job grades, exceeding our CTO and CEO aspirational quarterly goals focused on URG management and senior leadership,\n* We reached 37% for women in senior leadership roles, exceeding our aspirational goal by 7%.\n* We established three new Team Member Resource Groups focused on inclusion and belonging: Caregiving, Global Voices, and Black at GitLab.\n\nCheck out our [DIB report](http://about.gitlab.com/diversity-inclusion-belonging) for all of the latest details.\n\n## 2. Greenhouse gas emissions \nPart of doing responsible business means minimizing our environmental footprint. GitLab is a fully remote company without direct emissions from company-owned facilities or direct energy consumption. Accordingly, our greenhouse gas (GHG) inventory measures Scope 3 emissions only, specifically the emissions associated with remote work, purchased goods and services, cloud services, and business travel. We will use the results of the inventory to better understand our key sources of emissions, set reduction goals using fiscal year 2023 as a baseline, develop a reduction plan, and educate our fully remote team on how they can understand and reduce their GHG emissions at home.\n\n## 3. Talent and engagement \nWe're a team of helpful, passionate people who want to see each other, GitLab, and the broader GitLab community succeed. We care about what our team members achieve: the code shipped, the user that was made happy, and the team member that was helped. One way we measure engagement is through an annual survey and in fiscal year 2023, we achieved an 82% participation rate and an overall ‘favorable’ engagement score of 81%. The results from our survey will help drive our talent strategy. \n\n## 4. Information security and data privacy \nAt GitLab, we know how much security and privacy matter to our customers and stakeholders. We maintain a formal [Security Assurance](https://about.gitlab.com/handbook/security/security-assurance/) department responsible for monitoring and reporting on GitLab's compliance with various security frameworks and standards. In fiscal year 2023, we received our ISO 27001 certification to include the ISO 27017:2015 cloud security standard and ISO 27018:2019 privacy standard. For more information on our approach to information security and data privacy, please visit our [Trust Center](https://about.gitlab.com/security/).\n\n## 5. Responsible product development\nGitLab's [product mission](https://esg-landing-page.about.gitlab-review.app/handbook/product-development-flow/) is to consistently create products and experiences that users love and value. Responsible product development is integral to this mission. We are committed to secure and ethical operations as an organization and, beyond that, strive to set an example by empowering our wider GitLab community to build and work with the highest levels of security through our DevSecOps platform. \n\n## 6. Business ethics \nGitLab is committed to the highest standards of legal and ethical business conduct and has long operated its business consistent with written operating principles and policies that reinforce this commitment. Compliance with GitLab’s policies and local and federal rules and laws is the individual responsibility of each team member. Team members are also required to deal honestly, ethically, and fairly with customers, partners, suppliers, competitors, and other third parties.\n\nWhile we’re excited to share our key programs, policies, and accomplishments in the ESG and DIB reports, we know that the work doesn’t stop here. We’re looking forward to investing more in this space and updating you, our stakeholders, along the way. \n\n## Read the ESG and DIB reports\n* [ESG report](https://about.gitlab.com/environmental-social-governance)\n* [DIB report](https://about.gitlab.com/diversity-inclusion-belonging/)\n",[675,9],{"slug":2633,"featured":6,"template":680},"gitlab-first-esg-and-dib-reports","content:en-us:blog:gitlab-first-esg-and-dib-reports.yml","Gitlab First Esg And Dib Reports","en-us/blog/gitlab-first-esg-and-dib-reports.yml","en-us/blog/gitlab-first-esg-and-dib-reports",{"_path":2639,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2640,"content":2646,"config":2651,"_id":2653,"_type":14,"title":2654,"_source":16,"_file":2655,"_stem":2656,"_extension":19},"/en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative",{"title":2641,"description":2642,"ogTitle":2641,"ogDescription":2642,"noIndex":6,"ogImage":2643,"ogUrl":2644,"ogSiteName":667,"ogType":668,"canonicalUrls":2644,"schema":2645},"How to use GitLab for Agile, CI/CD, GitOps, and more","Read our example engineering stories from the past two years that show how to use GitLab for you DevOps cycle, including GitOps, CI/CD and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681825/Blog/Hero%20Images/triangle_geo.jpg","https://about.gitlab.com/blog/gitlab-for-cicd-agile-gitops-cloudnative","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab for Agile, CI/CD, GitOps, and more\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-12-17\",\n      }",{"title":2641,"description":2642,"authors":2647,"heroImage":2643,"date":2648,"body":2649,"category":743,"tags":2650},[672],"2020-12-17","\n\nOn this blog, our community frequently shares tips, tricks, stories, and tutorials that demonstrate how to do different things with GitLab. This collection features some of our most popular and enduring how-to blog posts from the past two years, covering [CICD](/topics/ci-cd/), GitOps, Machine learning and more! See how various team members, companies, and users leverage GitLab to deliver software faster and more efficiently by reading and watching some of the tutorials we've featured.\n\n## Code review with GitLab\n\nWe know that code review is essential to effective collaboration, but the logistics of it all can be challenging. [Master code review by watching the demo](/blog/demo-mastering-code-review-with-gitlab/) included with this blog post.\n\n## Cool ways to use GitLab CI/CD\n\n### The basics of CI/CD\n\nBrand new to CI/CD? Read our [beginner's guide to the vocabulary and concepts](/blog/beginner-guide-ci-cd/).\n\nHere’s the [code you’ll need to build a CI/CD pipeline](/blog/how-to-create-ci-cd-pipeline-with-autodeploy-to-kubernetes-using-gitlab-and-helm/) with AutoDeploy to Kubernetes, using GitLab and Helm.\n\nNext, find the [code you'll need to build a CI pipeline with GitLab](/blog/basics-of-gitlab-ci-updated/), allowing you to run jobs sequentially, in parallel, or out of order.\n\n### Pipelines with CI/CD\n\nLearn how to [build a CI/CD pipeline in 20 minutes (or less) using GitLab’s AutoDevOps](/blog/building-a-cicd-pipeline-in-20-mins/) capabilities by following the instructions in this blog post, which is based on a popular GitLab Commit Brooklyn presentation that you can watch below.\n\nDiscover [how to trigger pipelines across multiple projects](/blog/cross-project-pipeline/) using GitLab CI/CD.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/-shvwiBwFVI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### CI/CD with Android\n\nAndroid project users are in luck because in [this post we explain how to set up GitLab continuous integration (CI) functions](/blog/setting-up-gitlab-ci-for-android-projects/) in Android projects.\n\nGitLab and fastlane pair up to [help users publish applications to the iOS store](/blog/ios-publishing-with-gitlab-and-fastlane/) using a GitLab CI/CD runner.\n\n### CI/CD and GKE\n\n![GitLab CI/CD and GKE integration](https://about.gitlab.com/images/blogimages/gitlab-gke-integration-cover.png){: .shadow.medium.center}\n\nWe explain [how to get started with GitLab CI/CD and Google Kubernetes Engine (GKE)](/blog/getting-started-gitlab-ci-gcp/) in this initial demo.\n\nGitLab self-managed user? ✅\nUsing Google Kubernetes engine? ✅\nGreat! The [next tutorial is all about how to use GitLab CI to install GitLab runners on GKE](/blog/gitlab-ci-on-google-kubernetes-engine/) using our integration. It shouldn’t take you more than 15 minutes.\n\n## GitLab for machine learning\n\nBut what about GitLab for machine learning? We’ve got you covered. Watch the demo from GitLab Virtual Commit to see how you can use GitLab to leverage tasks for machine learning pipelines.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/DJbQJDXmjew\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## GitLab for Agile\n\nGitLab features work for many software development methodologies, including [Agile](/solutions/agile-delivery/).\n\nStart by [mapping Agile artifacts to GitLab features](/blog/gitlab-for-agile-software-development/) and explore how iteration works using GitLab.\n\n![GitLab issue board](https://about.gitlab.com/images/blogimages/issue-board.png){: .shadow.medium.left}\n\nThe GitLab issue board allows for flexible workflows and can be organized to represent [Agile software development](/topics/agile-delivery/) states.\n{: .note.text-center}\n\nThen go more in-depth to learn [how to use GitLab for Agile portfolio planning and project management](/blog/gitlab-for-agile-portfolio-planning-project-management/).\n\n## Giddy for GitOps?\n\n[GitOps](/topics/gitops/) takes DevOps best practices that are used for application development such as [version control](/topics/version-control/), collaboration, compliance, and CI/CD, and applies them to infrastructure automation.\n\nGitLab is the [DevOps platform](/topics/devops/) that does it all, and it’s built using Git, making it the ideal solution for GitOps processes.\n\nFirst, we explained [how GitLab and Ansible can be used together for GitOps](/blog/using-ansible-and-gitlab-as-infrastructure-for-code/) processes and [infrastructure as code](/topics/gitops/infrastructure-as-code/). In a follow-up post, we explain how [GitLab can also be paired with Terraform for GitOps](/topics/gitops/gitlab-enables-infrastructure-as-code/) and IaC.\n\nThe video on how to use Ansible and GitLab together has been viewed more than 13,000 times since it was first created in 2019, and is embedded for you below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/M-SgRTKSeOg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Visibility\n\nOne of our principles at GitLab is to [dogfood everything](/handbook/engineering/development/principles#dogfooding), so you can rest assured that we aren’t about to introduce an engineering feature without first trying it out for ourselves. When it comes to our Insights tool though, the process happened in reverse. Our Engineering Productivity team at GitLab needed a particular tool, and as we built it, we realized it would benefit our GitLab Ultimate customers. Read on to [learn how our Insights tool came to be](/blog/insights/).\n\nDig into this [valuable explanation of how we discovered that Prometheus query language can be used to detect anomalies](/blog/anomaly-detection-using-prometheus/) in the time-series data that GitLab.com reports.\n\n## In the clouds\n\nWatch the demo to learn how GitLab runner and RedHat OpenShift can work together to jump start your application development and deployment to the cloud.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/yGWiQwrWimk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAnd finally, although Docker Hub may be enforcing new rate limits, there's no need to panic. We [explain how to build a monitoring plug-in](/blog/docker-hub-rate-limit-monitoring/) to help you monitor the number of pull requests.\n\nCan you think of some other stand-out blog posts or demos that we should include here? Drop the link in a comment below.\n\nCover image by [Chris Robert](https://unsplash.com/@chris_robert) on [Unsplash](https://unsplash.com/photos/kY-uPDLXxHg)\n{: .note}\n",[1090,1293,9,993],{"slug":2652,"featured":6,"template":680},"gitlab-for-cicd-agile-gitops-cloudnative","content:en-us:blog:gitlab-for-cicd-agile-gitops-cloudnative.yml","Gitlab For Cicd Agile Gitops Cloudnative","en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative.yml","en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative",{"_path":2658,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2659,"content":2665,"config":2671,"_id":2673,"_type":14,"title":2674,"_source":16,"_file":2675,"_stem":2676,"_extension":19},"/en-us/blog/gitlab-for-designers",{"title":2660,"description":2661,"ogTitle":2660,"ogDescription":2661,"noIndex":6,"ogImage":2662,"ogUrl":2663,"ogSiteName":667,"ogType":668,"canonicalUrls":2663,"schema":2664},"Help us shape the future of design discussion in GitLab","We've identified the need for full integration of user experience design within the DevOps lifecycle, and would love your feedback on how to make that happen.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680008/Blog/Hero%20Images/design-discussion.jpg","https://about.gitlab.com/blog/gitlab-for-designers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Help us shape the future of design discussion in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarrah Vesselov\"}],\n        \"datePublished\": \"2018-11-08\",\n      }",{"title":2660,"description":2661,"authors":2666,"heroImage":2662,"date":2668,"body":2669,"category":299,"tags":2670},[2667],"Sarrah Vesselov","2018-11-08","\n\nAt GitLab, we do everything using, well, GitLab. Using our product as part of our workflow allows us to experience, firsthand, the limitations and frustrations that may prevent our users (and us) from being able to get work done quickly and efficiently. In the user experience (UX) department, we've found ourselves struggling with some important aspects of our day-to-day work – this is what we've found, and how we hope to address it:\n\n## Design discussions quickly become hard to follow\n\nDesign discussion happens inside of issues at GitLab. Typically, a designer will post a wireframe, mockup, or prototype within a comment on an issue to elicit feedback from others. The transparency is excellent: product managers, engineers, and designers can all come together to talk over the problem and the possible solutions. Problems creep in when conversations get too lengthy, hard to follow, and involve multiple iterations of a design. How can we make design discussion at GitLab more useful and accessible?\n\n## We need version control for design files\n\nWe use Sketch for our day-to-day design work. The UX department's Sketch files live within a [design repository](https://gitlab.com/gitlab-org/gitlab-design) to ensure that all designers have access to current patterns and solutions. However these files are not version controlled within the repository, so designers keep personal folders for work-in-progress designs. How can we version control our files within GitLab and eliminate the need to keep multiple versions of a particular design?\n\n## A competitive analysis of design platforms and applications\n\nTo start looking for solutions to these problems, we conducted a competitive analysis of the other platforms and applications out there tackling design creation, collaboration, and handoff. We wanted to know: What are other design teams doing to address these problems? Are there existing aspects of GitLab we can leverage to solve these problems? If not, what would an [MVC](/handbook/product/product-principles/#the-minimal-viable-change-mvc) look like to integrate designers more efficiently into GitLab?\n\n### Summary of findings\n\nToday's average user is tech savvy, with high expectations for interface usability. Products must be useful and easy to use for users with a wide range of backgrounds, experiences, and expectations. As a result, enterprise-level companies have invested heavily in building UX teams to produce beneficial experiences. These UX teams have distinct requirements for the toolsets they use. Design tools must be able to:\n\n* Improve UX consistency\n* Enable research and testing of designs with users\n* Clarify requirements\n* Facilitate collaboration between teams (Engineering, PM, UX)\n* Version control design files\n* Minimize duplication of work with an SSOT\n* Minimize context switching\n\nThe last requirement, minimize context switching, really stood out. Enterprise designers work on a variety of platforms. The market has exploded over the past decade, with a majority of designers moving from using desktop software to cloud-based platforms. Designers want and need a single-platform approach. They must have the ability to design, collaborate, and share their work with the rest of the organization within one platform.\n\nThis single-platform approach presents a unique opportunity for us. GitLab is the first single application built from the ground up for all stages of the DevOps lifecycle for Product, Development, QA, Security, and Operations teams to work concurrently on the same project. A significant missing piece of this lifecycle is UX design.\n\n### Areas of opportunity for GitLab:\n\n* Review and collaboration\n* Interaction design\n* Version control\n* Developer handoff\n* Design system management\n\nThe total market potential is over US $4 billion and growing. With no clear winners in the design tool space, there is a significant opportunity for an application that can successfully engage developers and design teams in the DevOps lifecycle.\n\nYou can view the [complete competitive analysis here](https://docs.google.com/document/d/12o6h6Fm7bAjhW5AK1r-PNhvn0QrQwZncorYNia12e3Q/edit?usp=sharing).\n\n## What's next?\n\nA logical place to start is by improving discussion within issues. Design proposals are available in issue descriptions, shared and discussed in comments, and it's not always clear which is the latest version. While we have the option to mark and [comment on specific image spots in the blob view and merge requests](https://docs.gitlab.com/ee/user/discussions/#image-discussions), the actual design collaboration happens much earlier in the process.\n\nOne idea is to make design artifacts a first-class citizen by linking to design assets in the side navigation of an issue. We could allow for commenting on images and propagate these comments in the sidebar for focused and cohesive discussion.\n\nWe want to know what you think! You can take a look at and comment on the [design artifacts discovery issue here](https://gitlab.com/gitlab-org/gitlab-ce/issues/53587).\n\n[Photo](https://www.pexels.com/photo/notes-clean-whiteboard-board-7067/) by [Startup Stock Photos](https://www.pexels.com/@startup-stock-photos) on Pexels.\n{: .note}\n",[811,9,700,1698,1440],{"slug":2672,"featured":6,"template":680},"gitlab-for-designers","content:en-us:blog:gitlab-for-designers.yml","Gitlab For Designers","en-us/blog/gitlab-for-designers.yml","en-us/blog/gitlab-for-designers",{"_path":2678,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2679,"content":2685,"config":2690,"_id":2692,"_type":14,"title":2693,"_source":16,"_file":2694,"_stem":2695,"_extension":19},"/en-us/blog/gitlab-for-the-non-technical",{"title":2680,"description":2681,"ogTitle":2680,"ogDescription":2681,"noIndex":6,"ogImage":2682,"ogUrl":2683,"ogSiteName":667,"ogType":668,"canonicalUrls":2683,"schema":2684},"GitLab 101 – a primer for the non-technical","If a set-in-her-ways English major can conquer the GitLab product and culture, you can too. Here’s what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678544/Blog/Hero%20Images/gitlab101.jpg","https://about.gitlab.com/blog/gitlab-for-the-non-technical","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 101 – a primer for the non-technical\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-08-02\",\n      }",{"title":2680,"description":2681,"authors":2686,"heroImage":2682,"date":2687,"body":2688,"category":808,"tags":2689},[869],"2019-08-02","\nI am living proof it’s possible to work at GitLab and not be particularly technical, or even particularly quick about learning technical things. Three months ago I joined the company having never used the tool and with no idea what a merge request or an issue was. I’d never touched Git or pushed a commit, and I certainly had never owned a laptop with Docker on it.\n\nIf you’re like me, fear not. Here’s everything you need to know to jump right in.\n\n## It’s an issue\n\nLet’s start with the thing that confused me the most in the first weeks – issues. An [issue](/handbook/communication/#issues) is something you create if you want to start an initiative, or simply keep track of an idea. Derived from the software development space (obviously), it’s like the starting point in any work-related conversation. Have a great idea for a new GitLab feature? Open an issue. Have an idea for a marketing campaign? Start an issue. Anyone can chime in on your issue and it becomes a place to not only have a conversation but also to keep track of the conversation. At GitLab we call all that “chiming in” collaboration. [Collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) is central to the company’s culture and our mission [“everyone can contribute.”](/blog/how-do-you-contribute/) Issues are sort of the file folders we store all that collaboration in. (And, because you might hear this term and wonder about it, as I did...an [“epic”](https://docs.gitlab.com/ee/user/group/epics/) is a collection of related issues, sort of how a filing cabinet holds file folders, to use a very old school analogy.)\n\n## Lanes merge ahead\n\nA [merge request](/handbook/communication/#start-with-a-merge-request) is a formalized way to request something (usually in the [GitLab handbook](/handbook/) or [blog](/blog/)) be created or changed. Creating a merge request triggers GitLab.com to rebuild the entire website (which is both cool and sort of scary the first few times you do it). When you submit a merge request you’ll get a message that says the pipeline is running, meaning the process of rebuilding the entire website has begun. That’s not a small undertaking, so it can take 15 minutes, or more, for your merge request to go through. If it does go through, you’ll get a message that says “passed with warnings!” Ignore the “warnings” – builds always pass with warnings. These warnings are usually not relevant if you're not contributing code. The key thing is it passed. (Speaking from personal experience, refreshing the page or simply staring at the “pipeline running” message doesn’t actually make it go faster.)\n\nNotice the term is merge *request.* That means once it’s passed you’ll need to ask someone who has magical merging powers to actually merge it (usually your manager). You do that by assigning the request to them (top right of the MR form) and leaving them a comment asking them to do so.\n\n## All aboard\n\nYou’ll get a big [onboarding](/handbook/people-group/general-onboarding/) issue on day one. Do not panic. Take your time. And realize that some of what you’re doing will only make sense in a month, or even a few months (like all that time I spent downloading Git).\n\nMost of the onboarding tasks are very straightforward and helpful. But ultimately you’ll have to add yourself to the [team page](/company/team/), creating your first merge request in the process. Anything involving the team page can be very tricky because it is based on `.yml` files (cranky, touchy things that are pronounced a little like the vegetable, “yaml”) so do not be afraid to ask for help. The #mr-buddies, #git-help, or #questions channels in Slack can be great resources. You’ll want to remember to use “command F” to search through the hundreds of files on the team page to find your entry.\n\nDon’t worry – no matter how much of a struggle it is to add yourself to the team page, you’re unlikely to actually “break” anything on [about.gitlab.com](/). (I’ll freely admit it took me *several days* to accomplish this one task… )\n\n## Communication\n\nIn an all-remote company, communication is vital. But *how* to communicate at GitLab doesn’t necessarily come naturally to someone like me who came from an email and phone call culture. Our communication methods are [spelled out in the handbook](/handbook/communication/#introduction), but here’s the quick version: You want to communicate primarily within GitLab. That means within an issue – tag someone with their GitLab “handle” (@vsilverthorne as an example) – in the discussion box. Or the same thing can happen in a merge request. Whoever you tag will get a notification in their To-do list on GitLab, and may also be notified via email. But speaking as someone who’s been pointed in the right direction after using Slack or email instead of GitLab, trust me when I say _within_ GitLab is the first and best way to communicate.\n\nIf it’s urgent, [Slack](/handbook/communication/#slack) can be a good choice. Slack is also a great place to ask questions, chit-chat with colleagues and/or share common interests. GitLab has lots of groups on Slack for everything from crafty people to gardeners. Email is the last choice because much of the company checks it only occasionally.\n\n## Meetup IRL or virtually\n\nThe [video call on Zoom](/handbook/communication/#video-calls) is another key GitLab practice and although I was a little skeptical it could be more effective than a phone call, I’m now a convert. Not only do you get to know people better because you can see them, the ability to screen share is invaluable, particularly when you’re learning something new. I never feel “camera ready” though, so if you feel that way, you’re far from alone. Luckily, there is a function on Zoom called \"Touch up my appearance.\" It's like FaceTune for the workplace instead of Instagram. Just go into Zoom>Preferences>Video and under My Video check \"Touch up my appearance.\" This way your dark circles won't be making an appearance in the latest video on [GitLab Unfiltered](https://www.youtube.com/channel/UCMtZ0sc1HHNtGGWZFDRTh5A).\n\nIf meetups are possible in real life, I’d suggest those too. At an all-remote company you do have to put time and energy into feeling like you’re part of the team.\n\nAre there other challenges you’ve encountered when you were brand new to GitLab that would have been helped by a clearer or more detailed explanation? Let us know and we’ll update this blog post (and the handbook).\n\nCover image by [Charlotte Karlsen](https://unsplash.com/@charlottemsk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,1297,832],{"slug":2691,"featured":6,"template":680},"gitlab-for-the-non-technical","content:en-us:blog:gitlab-for-the-non-technical.yml","Gitlab For The Non Technical","en-us/blog/gitlab-for-the-non-technical.yml","en-us/blog/gitlab-for-the-non-technical",{"_path":2697,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2698,"content":2704,"config":2709,"_id":2711,"_type":14,"title":2712,"_source":16,"_file":2713,"_stem":2714,"_extension":19},"/en-us/blog/gitlab-hero-devops-platform",{"title":2699,"description":2700,"ogTitle":2699,"ogDescription":2700,"noIndex":6,"ogImage":2701,"ogUrl":2702,"ogSiteName":667,"ogType":668,"canonicalUrls":2702,"schema":2703},"How a GitLab engineer changed the future of DevOps","When Kamil Trzciński suggested we integrate GitLab version control and GitLab CI one into a single product, GitLab's pioneering DevOps Platform was born.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681121/Blog/Hero%20Images/whatisgitlabflow.jpg","https://about.gitlab.com/blog/gitlab-hero-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How a GitLab engineer changed the future of DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2020-10-29\",\n      }",{"title":2699,"description":2700,"authors":2705,"heroImage":2701,"date":2706,"body":2707,"category":675,"tags":2708},[762],"2020-10-29","\n\nJust recently, Gartner recognized [DevOps Value Stream Delivery](/solutions/value-stream-management/) Platforms as an emerging category in the software marketplace by publishing the new [Market Guide for DevOps Value Stream Delivery Platforms](https://page.gitlab.com/resources-report-gartner-market-guide-vsdp.html) (what we're calling a DevOps Platform). The Gartner report may not include the name \"Kamil Trzciński,\" but I want to recognize his contributions to this DevOps Platform category. If it weren't for his idea, we wouldn't have [launched GitLab as an all-in-one, single DevOps application](/blog/gitlab-master-plan/). It's a product that changed how engineers build software.\n\n**[[Learn more about our journey to the DevOps Platform](/blog/the-journey-to-a-devops-platform/)]**\n\nIt all started in 2015 with a GitLab runner that was built by one of the contributors from the wider community, [Kamil Trzciński](/company/team/#ayufan), who is now a distinguished engineer, Ops and Enablement, at GitLab. He wrote a runner that was faster, easier to run in parallel, easier to install, and easier to contribute to. We liked his runner so much that we deprecated ours to use his, and asked him to join our engineering team.\n\nAt that time, GitLab had two products: [GitLab Source Code Management](/solutions/source-code-management/) (SCM) and [GitLab Continuous Integration](/features/continuous-integration/) (CI). We were a DevOps company, but one with two key products that worked well together with some overlaps in code. Then Kamil made a suggestion that changed our company and has now defined a category: \"Why don't you combine the two to make GitLab a single application?\"\n\n[Dmitriy Zaporozhets](/company/team/#dzaporozhets), GitLab co-founder, thought there was no need to do it because the products were already perfectly integrated. And my gut reaction was no. Many of our customers were already building their own, DIY DevOps platforms with multiple tools. Combining GitLab SCM and GitLab CI would mean they got two tools where they expected only one. Our customers didn't seem to want an all-in-one tool, so why would we build it?\n\nBut as Kamil pointed out, there is a considerable amount of overlap between GitLab SCM and GitLab CI, and our engineers and users were spending a lot of development time and effort in managing functions and libraries that appeared in both technologies. In the end, we realized that it actually made a lot of engineering sense to build an all-in-one [DevOps platform](/solutions/devops-platform/). At first, our customers weren't sure about it – some even asked us to turn the CI function off in GitLab SCM because their engineers started using that over their official CI solution. But once we explained how much more efficient this made their application building efforts, they were sold. GitLab all-in-one meant one data store, fewer clicks, less context, and more efficiency in their application development processes. Kamil's idea was brilliant. Our developers were able to save development effort and didn't have to hop around between tools, same with the developers and operators who use GitLab to build their applications.\n\nWe wouldn't be where we are today if we didn't welcome the contributions of everyone in our globally distributed, open source software community. Just think, within one year, Kamil went from being a GitLab contributor who wanted to learn Go, to building a GitLab runner that blew us away, to redefining the **entire business strategy for our company**. It goes to show that companies are smarter when everyone can contribute.\n\nWatch the video below to hear Kamil describe how he came to join GitLab and made a proposal that went on to define the DevOps Platform category.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/CiJOTlU3wWs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n_Gartner, Market Guide for DevOps Value Stream Delivery Platforms, Manjunath Bhat, Hassan Ennaciri, Chris Saunderson, Daniel Betts, Thomas Murphy, Joachim Herschmann, 28 September 2020_\n\n**[[Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)]**\n\n_Gartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner's research organization and should not be construed as statements of fact. Gartner disclaims all warranties, expressed or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose._\n\nCover image by [Fabio Bracht](https://unsplash.com/@bracht?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/_z0DiiaIhB4)\n{: .note}\n",[1440,267,9],{"slug":2710,"featured":6,"template":680},"gitlab-hero-devops-platform","content:en-us:blog:gitlab-hero-devops-platform.yml","Gitlab Hero Devops Platform","en-us/blog/gitlab-hero-devops-platform.yml","en-us/blog/gitlab-hero-devops-platform",{"_path":2716,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2717,"content":2723,"config":2729,"_id":2731,"_type":14,"title":2732,"_source":16,"_file":2733,"_stem":2734,"_extension":19},"/en-us/blog/gitlab-identified-by-gartner-as-eapt-visionary",{"title":2718,"description":2719,"ogTitle":2718,"ogDescription":2719,"noIndex":6,"ogImage":2720,"ogUrl":2721,"ogSiteName":667,"ogType":668,"canonicalUrls":2721,"schema":2722},"GitLab named a ‘Visionary’ in 2019 Gartner Enterprise Agile Planning Tool Magic Quadrant","We're happy to announce GitLab has been named a 'Visionary' in Gartner's EAPT Magic Quadrant.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680619/Blog/Hero%20Images/construction-blueprint.jpg","https://about.gitlab.com/blog/gitlab-identified-by-gartner-as-eapt-visionary","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab named a ‘Visionary’ in 2019 Gartner Enterprise Agile Planning Tool Magic Quadrant\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jeremiah\"}],\n        \"datePublished\": \"2019-05-22\",\n      }",{"title":2718,"description":2719,"authors":2724,"heroImage":2720,"date":2726,"body":2727,"category":299,"tags":2728},[2725],"John Jeremiah","2019-05-22","\n\nGartner recently named [GitLab a ‘Visionary’](/analysts/gartner-eapt21/) in their Magic Quadrant research into Enterprise Agile Planning Tools. We believe that planning and delivery must be closely linked to enable product and project teams to streamline and accelerate delivery. In many organizations, disconnected tools create organizational islands, preventing teams from collaborating, sharing, and learning. Our vision for Concurrent DevOps is to enable teams to:\n\n- Bridge the gaps between PMs, Developers, Ops, and Security.\n- Build and manage their epics and roadmaps.\n- Prioritize work and organize sprints and Kanban boards to track the development and delivery of value to customers.\n\n## Everyone can contribute\n\nOur vision is to make it simple, easy, and fast for people to contribute and deliver value to their users. We believe that a [single application](/handbook/product/single-application/), preconfigured to work by default across the DevOps lifecycle, will enable faster cycle time, delivering innovation and value.\n\n## Easier workflows, increasing collaboration and productivity\n\nEnterprise Agile and Planning are critical activities that often determine the overall success of a project. Teams must work on the right things at the right time, and unless your planning processes are linked to your delivery actions, the potential for a disconnect is remarkably high.\n\nAt Hemmersbach, using GitLab helped them decrease the time from planning to production by 6.5 days. Working in a single environment, they are also achieving 60 builds per day where previously they were performing a single daily build.\n\n>“GitLab is the one tool that connects our whole team. You always see GitLab open and everything is based on GitLab. GitLab is the backbone of our software development.”  – Alexander Schmid, Head of Software Development, Hemmersbach\n\nGitLab solves the disconnect by enabling Enterprise Agile Planning within the same application that is used to manage the development and delivery.  Now, [Product Managers and Project Managers](/solutions/agile-delivery/) can groom their backlog and epics, build their roadmaps, and plan sprints without losing touch with the actual development and delivery flow. Kanban boards provide a visual and interactive way to manage the status and flow of issues through delivery.\n\n![burndown](https://about.gitlab.com/images/home/burndown-chart.png){: .shadow.medium.center}\n\n[Value Stream Management](/solutions/value-stream-management/) offers insight into planning and delivering projects so that teams can find and remove bottlenecks from their value stream.\n\nDownload the report and learn more about why Gartner named GitLab an Enterprise Agile Planning 'Visionary.'\n\n[Download the full report](/analysts/gartner-eapt21/)\n{: .alert .alert-gitlab-purple .text-center}\n\nGartner, Magic Quadrant for Enterprise Agile Planning Tools, 18 April 2019, Keith Mann, Mike West, Thomas Murphy, Nathan Wilson\n{: .note}\n\nGartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research organization and should not be construed as statements of fact. Gartner disclaims all warranties, express or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose.\n{: .note}\n\nImage by \u003Ca href=\"https://pixabay.com/users/pisauikan-4552082/?utm_source=link-attribution&amp;utm_medium=referral&amp;utm_campaign=image&amp;utm_content=2682641\">pisauikan\u003C/a> from \u003Ca href=\"https://pixabay.com/?utm_source=link-attribution&amp;utm_medium=referral&amp;utm_campaign=image&amp;utm_content=2682641\">Pixabay\u003C/a>\n{: .note}\n",[831,1440,9,675],{"slug":2730,"featured":6,"template":680},"gitlab-identified-by-gartner-as-eapt-visionary","content:en-us:blog:gitlab-identified-by-gartner-as-eapt-visionary.yml","Gitlab Identified By Gartner As Eapt Visionary","en-us/blog/gitlab-identified-by-gartner-as-eapt-visionary.yml","en-us/blog/gitlab-identified-by-gartner-as-eapt-visionary",{"_path":2736,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2737,"content":2743,"config":2750,"_id":2752,"_type":14,"title":2753,"_source":16,"_file":2754,"_stem":2755,"_extension":19},"/en-us/blog/gitlab-is-an-sca-contender",{"title":2738,"description":2739,"ogTitle":2738,"ogDescription":2739,"noIndex":6,"ogImage":2740,"ogUrl":2741,"ogSiteName":667,"ogType":668,"canonicalUrls":2741,"schema":2742},"GitLab is named a Challenger in The Forrester Wave™: Software Composition Analysis, Q2 2019","GitLab has been recognized by analysts as a challenger in Software Composition Analysis.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669950/Blog/Hero%20Images/security-cameras.jpg","https://about.gitlab.com/blog/gitlab-is-an-sca-contender","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is named a Challenger in The Forrester Wave™: Software Composition Analysis, Q2 2019\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2019-04-12\",\n      }",{"title":2738,"description":2739,"authors":2744,"heroImage":2740,"date":2746,"body":2747,"category":299,"tags":2748},[2745],"Cindy Blake","2019-04-12","\n\nWhile GitLab is best known in the traditional DevOps space, we have also begun to grow out our expertise in application security, which may come as a [surprise to security professionals](https://www.linkedin.com/pulse/ciso-cheat-sheet-git-cindy-blake-cissp), who may not have encountered us previously. We may have started out focused on traditional developer tools, however, as GitLab has added capabilities to cover the entire Software Development\nLifecycle (SDLC), this now includes not only a market-leading [Continuous Integration](/blog/gitlab-leader-continuous-integration-forrester-wave/)\nsolution but also, more recently, integrated [application security testing built into the CI/CD pipeline](/solutions/security-compliance/).\nOur single, end-to-end application enables security testing that is tightly aligned to today’s\nrapid, [iterative cycles of DevOps](/solutions/security-compliance/) development and the modern\ninfrastructure that accompanies cloud native applications.\n\n## Who was included?\n\nFor The Forrester Wave™: Software Composition Analysis, Q2 2019, participating vendors were required to\nhave most of the following capabilities out of the box:\n- Ability to provide remediation advice on both open source license risk and vulnerabilities;\n- Ability to integrate into SDLC automation tools;\n- Ability to provide proactive vulnerability management;\n- Ability to edit and create policies; and\n- Ability to visually report on open source risk.\n\nParticipating vendors were also required to have more than $10M in revenue and have\ninterest from Forrester clients or relevance to them.\n\n## GitLab is a new challenger\n\nHaving only added security capabilities in December 2017, GitLab has been excluded from\nother analyst application security reports that only look at more established players.\nIn our first official security-oriented analyst evaluation, we are excited not only to get the\nword out about GitLab’s security capabilities, but also to have this opportunity for analyst\nfeedback and insight into how GitLab compares. We take to heart not only areas where we\nshine – but also where improvement is needed. With GitLab,\n“[everyone can contribute](/community/contribute/),” and the feedback gained from\nForrester is another valuable contribution. We also welcome [your participation](/community/contribute/) and invite you to help us\nunderstand what you would like to see as our security capabilities grow.\n\nBased on this analyst report and analyst interaction feedback, we are already addressing improvement opportunities in our\n[roadmap](/direction/secure/#upcoming-releases) and [vision](/direction/secure/#direction).\n\n**Check out our [complete SCA response](/analysts/forrester-sca/) for links to specific updates and response comments.**\n\nAs a company dedicated to releasing incrementally, delivering first on breadth and then\non depth, it is not uncommon for GitLab to initially place in more of a challenger position,\nas our feature set generally does not have the same maturity as established players in the space.\nHowever, when GitLab enters a space, we do so boldly, with clear intentions and a solid strategy.\nGitLab’s strategy for application security testing and software composition analysis focuses\nmore equally on both the developer and the security professional than traditional solutions.\nYou will find some areas in strategy where we were not scored as highly as we believe we\nshould be, due to our more aggressive focus on development.\n\n## Updates since the evaluation\n\nGitLab has shipped a [major new release every month](/releases/categories/releases/)\nfor 90 consecutive months. Forrester evaluated GitLab 11.6 for this report while versions\n[11.7](/releases/2019/01/22/gitlab-11-7-released/), [11.8](/releases/2019/02/22/gitlab-11-8-released/), and\n[11.9](/releases/2019/03/22/gitlab-11-9-released/) have since been released. You will find several features\nthat Forrester felt were lacking have already been added, including improvements to the\nsecurity dashboard, additional languages added to SAST scanning, and secrets detection.\nWhen using Forrester’s scoring tool, be sure to adjust the criteria for our current capabilities.\nA list of what’s been added since Forrester’s evaluation can be found on our [complete SCA response](/analysts/forrester-sca/).\n\n## Forrester’s key takeaway: “Remediation, policy management, and reporting are key differentiators”\n\nForrester says, “As developers continue to use open source to accelerate the release of new\napplication functionality, remediation, policy management, and reporting will dictate which\nproviders will lead the pack. Vendors that can provide developers with remediation advice\nand even create patches position themselves to significantly reduce business risk.”\n\nThis takeaway is closely aligned with GitLab's [vision for application security testing](/direction/secure/#direction)\nand our work in progress for [auto remediation](https://gitlab.com/groups/gitlab-org/-/epics/133). While not available in the evaluated version (11.6), today’s GA release, (11.9), [can detect a more current patch available](/releases/2019/03/22/gitlab-11-9-released/#vulnerability-remediation-merge-request) and\nenable the developer to create a [new branch and apply the patch](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#create-a-merge-request-from-a-vulnerability)\nwith one click. Upcoming versions will [automatically run the pipeline and present the results](https://gitlab.com/groups/gitlab-org/-/epics/275) to the developer to accept or reject.\nBy automating remediations that are readily apparent, developers and security can focus on\nvulnerabilities whose remediation is not as straightforward.\n\nThe fact that GitLab is a [single application](/) for the entire SDLC enables us to take\nremediation even further – actually running the pipeline in a separate branch,\neven [measuring the performance impact](https://gitlab.com/gitlab-org/gitlab-ee/issues/9382)\nof the patch. We isolate the cause and effect: the developer makes a code change, that code is\ntested and they see the results before merging the code with others’. It also allows us to do [Dynamic scanning](https://docs.gitlab.com/ee/user/application_security/dast/) in the same manner, before the\ncode is merged with anyone else’s. We do this by spinning up a\n[review app](https://docs.gitlab.com/ee/ci/review_apps/) in the pipeline report.\nThis fully functioning app reflects the developer’s code changes and can be used for user testing,\nperformance testing, and dynamic app security scanning.\n\n## GitLab's advice\n\nWe believe GitLab is ideal for enterprises who are:\n\n* Using GitLab for CI/CD.\n* Practicing iterative development via DevOps.\n* Using containers and serverless.\n\nFor the enterprise that has not invested in app sec tools, GitLab can quickly provide\nscanning, often necessary for regulatory compliance, with a single application.\nGitLab offers SAST, DAST, Dependency, Container Scanning, and License Management [with one app](/stages-devops-lifecycle/application-security-testing/) – no need to evaluate and buy from multiple vendors, then stitch together integration with the DevOps toolchain. In fact, GitLab customer, [Glympse Inc.](https://glympse.com/),\nstood up 40 repos with automated security testing, using all of the GitLab scans, in less time\nthan they could have installed just the individual tools – and as a bonus, they impressed their\nauditors with their process.\n\nFor the enterprise already deeply invested in traditional app sec tools, GitLab affords a\nbroader and [earlier scanning effort](/solutions/security-compliance/), using a tool that\ndevelopers are already using. GitLab can scan every code change, much the way that\nevery airplane passenger gets scanned through security. Save the deeper scans for\nlater and/or less frequent evaluation by the security team. Consider using GitLab on select\nprojects to experience the more efficient workflow and potentially reduce your scanning costs from costlier tools.\n\n## Our response\n\n We invite you to see our [complete response](/analysts/forrester-sca/), and as always, welcome\n [your contributions](/community/contribute/)!\n\n Cover image by [Scott Webb](https://unsplash.com/@scottwebb) on [Unsplash](https://unsplash.com/photos/yekGLpc3vro)\n{: .note}\n",[2749,9,745,675,720,722],"cloud native",{"slug":2751,"featured":6,"template":680},"gitlab-is-an-sca-contender","content:en-us:blog:gitlab-is-an-sca-contender.yml","Gitlab Is An Sca Contender","en-us/blog/gitlab-is-an-sca-contender.yml","en-us/blog/gitlab-is-an-sca-contender",{"_path":2757,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2758,"content":2764,"config":2770,"_id":2772,"_type":14,"title":2773,"_source":16,"_file":2774,"_stem":2775,"_extension":19},"/en-us/blog/gitlab-issue-bash-june-2018",{"title":2759,"description":2760,"ogTitle":2759,"ogDescription":2760,"noIndex":6,"ogImage":2761,"ogUrl":2762,"ogSiteName":667,"ogType":668,"canonicalUrls":2762,"schema":2763},"Join GitLab's June Issue Bash","Join us thin June and help us squash some of the open issues in the GitLab Community Edition tracker!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680123/Blog/Hero%20Images/gitlab-issue-bash-june-2017-cover.png","https://about.gitlab.com/blog/gitlab-issue-bash-june-2018","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Join GitLab's June Issue Bash\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Fletcher\"}],\n        \"datePublished\": \"2018-05-09\",\n      }",{"title":2759,"description":2760,"authors":2765,"heroImage":2761,"date":2767,"body":2768,"category":299,"tags":2769},[2766],"Mark Fletcher","2018-05-09","\n\nThis June we'll be holding another of our quarterly issue bashes to allow the community to get involved in helping to squash some issues in the GitLab Community Edition issue tracker. We have over 1,900\n[GitLab contributors](http://contributors.gitlab.com/),\nand we are always looking for more people to join in and contribute to the project in any way that they can.\n\n\u003C!-- more -->\n\nOf course, some lucky contributors will be rewarded with awesome swag! 🙌\n\nCheck out [the Issue Bash landing page](/community/issue-bash/) for all the information about how it works. Please take a look there and provide any feedback to our [feedback project](https://gitlab.com/gitlab-org/issue-bash/feedback) or propose changes directly [here](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/source/community/issue-bash/index.html.haml)!\n\n## When is it going to happen?\n\nWe'll kick it off at 00:01 UTC on Saturday, **June 2nd**\nand will keep it up until 23:59 UTC on Sunday, **June 3rd**.\n\n## Who can contribute?\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n&nbsp;&nbsp;\n**At GitLab, everyone can contribute!**\n&nbsp;&nbsp;\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nThis is your chance to get involved! Most of the tasks don't require\ntechnical expertise, therefore, non-technical community\nmembers are definitely welcome and prize worthy!\n\n## How do you get involved?\n\nPlease see [the FAQ on the Issue Bash landing page](/community/issue-bash/#bash-q-a) to learn more about how to get involved.\n\n## Prizes\n\nAs prizes, we have some awesome swag available:\n\n- 14 T-shirts\n- 1 T-shirt and Hoodie for a lucky contributor\n\nUsers making any contributions to the project,\nbetween the start and end times of the event, will be entered into the random draw\nto win a prize. The contributions will be collated after the end of the event and\nprize winners, drawn at random, will be contacted in the weeks that follow.\n\nTo see how we draw winners at random please take a look at the [prize winner calculator project](https://gitlab.com/gitlab-org/issue-bash/prize-winner-calculator).\n\n## Questions? More info?\n\n[GitLab team](/company/team/) and [GitLab core team](/community/core-team/) members will be on hand to answer questions and close issues. Please mention them if you need any help or need attention on an issue.\n\n* [@markglenfletcher](https://gitlab.com/markglenfletcher)\n* [@tnir](https://gitlab.com/tnir)\n",[277,9,267,745],{"slug":2771,"featured":6,"template":680},"gitlab-issue-bash-june-2018","content:en-us:blog:gitlab-issue-bash-june-2018.yml","Gitlab Issue Bash June 2018","en-us/blog/gitlab-issue-bash-june-2018.yml","en-us/blog/gitlab-issue-bash-june-2018",{"_path":2777,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2778,"content":2783,"config":2789,"_id":2791,"_type":14,"title":2792,"_source":16,"_file":2793,"_stem":2794,"_extension":19},"/en-us/blog/gitlab-joins-msft-tech-accord",{"title":2779,"description":2780,"ogTitle":2779,"ogDescription":2780,"noIndex":6,"ogImage":1970,"ogUrl":2781,"ogSiteName":667,"ogType":668,"canonicalUrls":2781,"schema":2782},"GitLab joins Cybersecurity Tech Accord","Today we're happy to announce that we're one of 11 companies joining the Cybersecurity Tech Accord.","https://about.gitlab.com/blog/gitlab-joins-msft-tech-accord","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab joins Cybersecurity Tech Accord\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kathy Wang\"}],\n        \"datePublished\": \"2018-06-20\",\n      }",{"title":2779,"description":2780,"authors":2784,"heroImage":1970,"date":2786,"body":2787,"category":299,"tags":2788},[2785],"Kathy Wang","2018-06-20","\n\nGitLab has joined the [Cybersecurity Tech Accord](https://cybertechaccord.org/), along with 10 other companies. The Cybersecurity Tech Accord is a consortium of over 40 security-minded tech companies that pledge to work collaboratively to protect our users and customers, guided by a set of [security principles](https://cybertechaccord.org/accord/).\n\nTransparency is and has alway been one of GitLab’s [core values](https://handbook.gitlab.com/handbook/values/#transparency), and we have always strived to make information available to our users and customers. In the security industry, this is an especially challenging line to walk, and we want to contribute not only to the collaborative efforts outlined in the [Cybersecurity Tech Accord](https://cybertechaccord.org/accord/), but to also share our experiences with other tech companies on what worked well for us in the iterative process of making security more transparent.\n\nOur [security team](/handbook/security/) works hard to protect user and customer data. In addition, GitLab is committed to building [security capabilities in our product offerings](/features/) to help our customers improve upon their software development lifecycle process.\n\nAt GitLab, we believe security is everyone’s job. The security industry has relied on collaborations with industry peers because collectively, we are able to obtain richer data and insights about our adversaries, in order to protect our users and customers. We look forward to building improved actionable data sharing and collaborative efforts with our tech peers.\n",[9,720,675],{"slug":2790,"featured":6,"template":680},"gitlab-joins-msft-tech-accord","content:en-us:blog:gitlab-joins-msft-tech-accord.yml","Gitlab Joins Msft Tech Accord","en-us/blog/gitlab-joins-msft-tech-accord.yml","en-us/blog/gitlab-joins-msft-tech-accord",{"_path":2796,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2797,"content":2803,"config":2808,"_id":2810,"_type":14,"title":2811,"_source":16,"_file":2812,"_stem":2813,"_extension":19},"/en-us/blog/gitlab-journey-to-cicd",{"title":2798,"description":2799,"ogTitle":2798,"ogDescription":2799,"noIndex":6,"ogImage":2800,"ogUrl":2801,"ogSiteName":667,"ogType":668,"canonicalUrls":2801,"schema":2802},"GitLab's unconventional journey to CI/CD and Kubernetes","How the Delivery team at GitLab used our existing resources to overhaul our system to make way for CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678397/Blog/Hero%20Images/raphael-biscaldi-cicd.jpg","https://about.gitlab.com/blog/gitlab-journey-to-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's unconventional journey to CI/CD and Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-10-03\",\n      }",{"title":2798,"description":2799,"authors":2804,"heroImage":2800,"date":2805,"body":2806,"category":743,"tags":2807},[672],"2019-10-03","\nEngineering teams are under pressure to provide value in the form of new features, all while minimizing [cycle time](/blog/reduce-cycle-time/). Oftentimes the instinct is to adopt modern tooling to make that happen. Continuous integration and delivery (CI/CD) is baked into GitLab, our single application for the DevOps lifecycle, and we are undergoing a major migration to Kubernetes to speed up our cycle time even more. But our journey to CI/CD and eventually Kubernetes has been unconventional, as the [Delivery team](/handbook/engineering/infrastructure/team/delivery/) elected to stress our current system as we step into [continuous delivery](/topics/continuous-delivery/) on GitLab.com before migrating entirely over to Kubernetes.\n\n## Releases before CI/CD\n\nThe wider GitLab community and GitLab team members [averaged 55 commits per day between Aug. 7 and Sept. 27, 2019](https://gitlab.com/gitlab-org/gitlab-ee/-/graphs/master/charts) as they continually iterate on our product to build new features for our customers. But before we adopted continuous delivery, we had to institute feature freeze periods beginning on the 7th of each month. During this period, engineers would shift their focus from building new features to fixing bugs in preparation for the upcoming release, which always happens on the 22nd.\n\n The use of a specific defined deadline encouraged behavior that ultimately caused developers to focus more on the due date and not around accomplishing the work.\n\n\"... developers would really play around the 7th because they would think ‘Oh, I have time, the 7th is in seven days,’ and then on the 6th at midnight they would panic merge things,\" said [Marin Jankovski](/company/team/#marin), engineering manager for the Delivery team. \"Because they know that if they missed this deadline they will have to wait for the next month, and if they get it in under this deadline they have a good two weeks to fix any problems that happen.\"\n\nSince the conception of GitLab.com, the feature freeze was used as a stabilization period, Marin explained.\n\nSoon though, the demand for new features from new users was pushing us to escalate our development velocity on GitLab.com. The stabilization period slowed our cycle time and created a significant drag in our turnover time for bug fixes, regression, and feature shipping for users both on GitLab.com and self-managed customers.\n\n“In some cases (the feature freezes) would even cause platform instability due to the fact that highest priority fixes couldn't find its way into customer hands quick enough,” said Marin. “By moving to CD, we can get both features and bug fixes alike into the hands of our users much quicker.”\n\nBefore the [Delivery team was created to manage GitLab.com's transition to continuous delivery](/handbook/engineering/infrastructure/team/delivery/#top-level-responsibilities) – and eventually Kubernetes – we depended upon a [release manager](/blog/release-manager-the-invisible-hero/), a rotating position among developers, to prepare the release. The [release process was iterated on over a five-year period](/community/release-managers/) as the release managers created a knowledge base and some automation to make the release process work.\n\nBut this method was inefficient as the timing behind the deployment process and release preparations was unpredictable, taking between half a day to multiple days due to the [accumulation of manual tasks in the process](https://gitlab.com/gitlab-org/release/docs/blob/master/general/tooling.md).\n\n“The release manager would get a set task list to go through, a deadline by which the tasks should be completed and they would have to repeat these steps over again until the release is ready, but also stable on GitLab.com,” explained Marin. At the highest level overview, the release manager had to:\n\n*   Manually sync the various repositories that GitLab consists of\n*   Ensure that the correct versions are set in the manually created Git branches\n*   Once the release is tagged, manually deploy to GitLab.com environments for both non-production and production\n*   Verify that everything is operational and manually publish the packages for self-managed users\n\nDuring his [presentation on this topic at GitLab Commit Brooklyn](https://youtu.be/lD-cYylwOLg), Marin shared the results of a 2018 survey which revealed that in the 14-day period before a release, the Delivery team spent 60% of their time babysitting deploys, and another 26% of their time on manual or semi-manual tasks release tasks, such as writing the monthly release post.\n\n![Task breakdown before CI/CD](https://about.gitlab.com/images/blogimages/journey-to-cicd/release-task-spread.jpg){: .medium.center}\nResults of a 2018 survey showing how the Delivery team spent their time two weeks before a release, before continuous delivery.\n{: .note.text-center}\n\n\"If you take a look at the whole thing, in 14 days, in two weeks, my team did nothing but sit on the computer and watch, well, paint dry, I guess,\" said Marin.\n\nBut by tackling 86% of the pie (60% deploys + 26% of the release manual tasks), the Delivery team could solve a few problems:\n\n1.  No release delays\n1.  Repeatable and faster deploys to enable no downtime\n1.  More time for our GitLab.com Kubernetes migration\n1.  More space to prepare the organization for continuous delivery\n\nAlthough CD is only on GitLab.com, our self-managed customers also benefit from our transition to CD. Now anything that isn't caught with CI testing is tested automatically and manually in environments before ever reaching GitLab.com. Anything that requires a fix that does reach GitLab.com can be fixed in a few hours, so the final release for self-managed customers won't include these particular issues.\n\n## Our unique approach to transitioning to CD and Kubernetes\n\nThe transition from using feature freezes to adopting CD on GitLab.com was inevitable as our features set grew, and a team of engineers, led by Marin, was formed to oversee this transition: “The Delivery team has been formed with the sole purpose of moving the company to a CD model for GitLab.com but at the same time for migrating GitLab.com to the Kubernetes platform to enable easier scaling and even faster turnaround times.”\n\nMany companies in GitLab’s position would have started this journey to CI/CD and Kubernetes by first integrating the new technologies into their workflow, and amending the development process as they go. We opted for a different approach.\n\nThe migration to Kubernetes requires a shift in both production systems and the engineering mindset, explained Marin. Kubernetes offers some features that teams can easily leverage without any extra investment. But in order to derive the greatest value from the free features Kubernetes offers, there ought to be some existing CI/CD process already in place.\n\nThe Delivery team recognized that in order to smooth the transition to Kubernetes for continuous delivery, our engineers must already be working with a CI/CD mindset – this includes a strong focus on quality assessments (QA) and stricter feature planning. So the Delivery team went with the [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions) and used our existing tools to build a CD system and reorganize the application infrastructure of GitLab.com instead of first adopting new tooling and technologies for CD.\n\n“The idea was simple,” said Marin. “We [leverage the tools at our disposal](https://gitlab.com/gitlab-org/release/docs/blob/master/general/deploy/auto-deploy.md), automate most of the manual tasks and ‘stress test’ the whole static system. If the static system can withstand the test, we move toward a more dynamic test.”\n\nThere were two key benefits to taking this approach:\n\n**First**, any weaknesses in our application were exposed and stabilized by automating with CI, so our application is stronger and less brittle, making a complete migration to Kubernetes more likely to be a success.\n\n**Second**, by shifting the engineering team to the CD mindset, we created a cultural shift among the engineers at GitLab who were accustomed to weekly deploys and waiting up to a day to see the impact of their merge.\n\n> “The definition of ‘done’ for developers has changed since the adoption of CI/CD,” said Marin.\n\nBefore CI/CD, a change was “done” once the review was completed. This was excluding deployments to various environments which took a considerable amount of time. Today, deployments are shipped within hours so there is no reason to not confirm that a change is working in testing and production environments.\n\nThe adoption of review apps on Kubernetes allow developers to run QA checks in virtually real time, and the use of [feature flags](/blog/feature-flags-continuous-delivery/) for progressive delivery also helps to accelerate development.\n\n“Since the first step in CD, developers are required to react to any automated QA but also carry out another level of manual verification in both non-production and production environments. Additionally, developers can have their changes running in production within a day compared to multiple days (and weeks).”\n\nEveryone can run QA checks on their code more frequently with CD. Because code changes are shipped around the clock with our CI/CD system, developers now operate an on-call rotation to help with any outstanding issues that are happening live on GitLab.com since the \"incubation\" time is much shorter.\n\n## Our new method\n\nSince the adoption of a CI/CD system, 90% of the [release process is automated](https://gitlab.com/gitlab-org/release/tasks/issues/885) using the [CI features of GitLab](/direction/verify/continuous_integration/). The remaining 10% requires human intervention due to coordination between various stakeholders.\n\n“We are slowly reducing those 10% as well with the goal of having only approvals needed to publish a release,” said Marin. [In the current iteration, the CI/CD process operates as follows](/direction/ops/):\n\n*   CI automatically looks for specific labels in merged MRs, applied by code reviewers and developers.\n*   CI automatically syncs all required repositories but also creates required Git branches, tags, as well as setting the correct versions of the release we want to ship.\n*   When the builds complete, packages are automatically deployed to non-production environments.\n*   Automated QA tasks are executed and, if passing, the deployment is rolled out to a small subset of users in production.\n*   In parallel, developers do another level of manual QA to ensure that new features are functioning as expected.\n*   If a high severity issue is discovered with manual verification, the deployments are stopped.\n*   When the above is completed, a member of the Delivery team will trigger a rollout to all users on GitLab.com.\n*   Self-managed release is then created from the last known working deployment running on GitLab.com.\n\nAs is true for any engineering team, scaling remains a challenge for us. But one of the biggest technical challenges is making sure there is enough QA coverage, which can be labor intensive for a product as big at GitLab.com. Also, making sure the monitoring and alerting is sufficient so the product isn’t operating solely based upon pre-set rules.\n\nThe second major challenge is the complexity of our GitLab.com system, and communicating the change in process across our engineering teams. “Dismantling more than five years of built-up process and habit is never easy,” said Marin.\n\n## The results\n\nGitLab is already benefitting from the shift to CI/CD in a number of ways.\n\nThe results of a new 2019 survey assessing how the Delivery team spends their time in the same 14-day period before the release shows that today, 82% of the team's time is freed up to work on other important tasks.\n\n![Task breakdown since CI/CD](https://about.gitlab.com/images/blogimages/journey-to-cicd/chart.jpg){: .medium.center}\nThe results of a 2019 survey measuring the same two weeks before the release shows the switch to CD has freed up valuable developer time.\n{: .note.text-center}\n\nBy automating manual tasks, the Delivery team was able to shift their focus toward changing the GitLab.com infrastructure to better support our development velocity and user traffic, as well as beginning the migration to Kubernetes.\n\n> \"And, did I mention, none of this is on Kubernetes. All of this is using our 'old' legacy system,\" said Marin to the GitLab Commit Brooklyn audience. \"But what happened with this is we bought ourselves time, so my team actually has time to work on the migration. But one of the biggest changes that happened was in the habits of the engineering organization.\"\n\nThe results since the shift have been significant. The Delivery team went from around seven deploys under the old system in May 2019 to 35 deploys on GitLab.com in August 2019, and is on track to surpass these numbers considerably now that they're shipping multiple deploys a day.\n\n“We have just completed the migration of our Registry service to Kubernetes and if you use [Container Registry on GitLab.com](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/70), all your requests are served from the Kubernetes platform,\" said Marin. \"Since GitLab is a multi-component system, we are continuing to isolate and migrate other services.”\n\nNew CI/CD features are included in each release. For example, in our 12.3 release, we [expanded the GitLab Container Registry to allow users to leverage CI/CD to build and push images/tags to their project](/releases/2019/09/22/gitlab-12-3-released/#remove-container-images-from-cicd) among other exciting new features.\n\n## Transitioning your system to continuous delivery?\n\nFor companies considering the transition to CD, Marin advised to start with what you’ve got.\n\n“From my perspective, waiting for migrating to a new platform is the real ‘enemy,’” said Marin. “Most systems can be altered in some ways to enable faster turnaround time without migrating to a fully new system. Speeding up the development/release cycle has multiplier return per engineer in that system and that frees up more time for migrations to new platforms, such as Kubernetes.”\n\nIf you’re curious about what’s up next, [check out this detailed summary of the exciting new CI/CD features](/blog/a-look-ahead-for-gitlab-cicd/) on track to be released in 12.4 and beyond.\n\n## Missed GitLab Commit Brooklyn?\n\nIf you missed Marin's presentation on the prequel to Kubernetes, watch the entire video below and catch us in Europe at [GitLab Commit London on October 9](/events/commit/)!\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/lD-cYylwOLg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n[Cover Photo](https://unsplash.com/photos/rE3kbKmLmhE) by [Raphaël Biscaldi](https://unsplash.com/@les_photos_de_raph?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/journey?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[109,9],{"slug":2809,"featured":6,"template":680},"gitlab-journey-to-cicd","content:en-us:blog:gitlab-journey-to-cicd.yml","Gitlab Journey To Cicd","en-us/blog/gitlab-journey-to-cicd.yml","en-us/blog/gitlab-journey-to-cicd",{"_path":2815,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2816,"content":2822,"config":2827,"_id":2829,"_type":14,"title":2830,"_source":16,"_file":2831,"_stem":2832,"_extension":19},"/en-us/blog/gitlab-kubernetes-agent-on-gitlab-com",{"title":2817,"description":2818,"ogTitle":2817,"ogDescription":2818,"noIndex":6,"ogImage":2819,"ogUrl":2820,"ogSiteName":667,"ogType":668,"canonicalUrls":2820,"schema":2821},"A new era of Kubernetes integrations on GitLab.com","The GitLab Agent for Kubernetes enables secure deployments from GitLab SaaS to your Kubernetes cluster and provides deep integrations of your cluster to GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681920/Blog/Hero%20Images/kubernetes.png","https://about.gitlab.com/blog/gitlab-kubernetes-agent-on-gitlab-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A new era of Kubernetes integrations on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-02-22\",\n      }",{"title":2817,"description":2818,"authors":2823,"heroImage":2819,"date":2824,"body":2825,"category":675,"tags":2826},[2531],"2021-02-22","\n\nThe GitLab Agent for Kubernetes (\"Agent\", for short) provides a secure connection between a GitLab instance and a Kubernetes cluster and allows pull-based deployments to receive alerts based on the network policies. We released the first version of the Agent back in September on self-managed GitLab instances. We are happy to announce that the Agent is available on GitLab SaaS, GitLab.com, and has many more features coming soon.\n\nIf you run into any issues with the Agent or would like to provide feedback, please, [contribute in the Agent epic](https://gitlab.com/groups/gitlab-org/-/epics/3329).\n{: .alert .alert-warning}\n\n## Why a new era?\n\nBefore, the recommended way to attach a cluster to GitLab was to provide the cluster certificates and to open up the Kube API to GitLab.com. To get the most out of the integrations, we recommended attaching the cluster with `cluster-admin` rights, so GitLab could provision new namespaces and create review apps. But many users found this to be overly risky and instead rolled out custom integrations that were often built around the GitLab Runner. We want to simplify and support security-minded users with the GitLab Agent for Kubernetes and provide them with a safe, reliable, and future-proof integration solution between GitLab and their clusters. The GitLab Agent provides a secure connection between the cluster and GitLab. Access rights can be controlled with the Agent more tightly by our users, and we consider it to be the basis for future Kubernetes integrations with GitLab.\n\nWhen Kubernetes was just starting to get popular, our initial approach served new Kubernetes users well. At the same time, providing `cluster-admin` rights is not an option for many current users with experienced Site Reliability Engineers (SREs) and Platform Engineers on board. In the past few years, thanks to the certificate-based integrations, we have learned a lot about the needs of GitLab users, and we are leveraging these learnings with the Agent.\n\n## How does the Agent work?\n\nThe Agent provides a permanent connection using websockets or gRPC between a Kubernetes cluster and a GitLab instance. Since we want to keep the cluster-side component minimal and lightweight, we imagine multiple Agents being installed into the same cluster with different access levels. Still, this integration is complex. To understand how the Agent works, let me first introduce its major components. The whole Agent experience is made possible primarily by two components that we call `agentk` and `kas` (short for GitLab Agent Server). `agentk` is the cluster-side component that has to be deployed in the cluster, while `kas` is the GitLab server-side component that is managed alongside GitLab. Since we want to keep the cluster-side component as slim as possible, `kas` is responsible for much of the heavy lifting.\n\nThe Agent is configured in code, then registered with GitLab through an access token. Once installed in the cluster, `agentk` receives the access token and the `kas` endpoint and authenticates itself with GitLab. Subsequently, it retrieves its own configuration from GitLab, and keeps a connection open between `kas` and the cluster. This way both the agent and GitLab can send messages and receive information from the other party through a secure connection. This approach also allows a Kubernetes cluster sitting behind a firewall to be securely integrated with GitLab.com.\n\n## Getting started\n\n### About the Agent's availability\n\nIf you would like to try out the Agent on GitLab.com, `kas` is already installed and is managed by our SRE team. Before making the Agent generally available, we want to make sure that Agent-based workflows won't harm the performance of GitLab.com. This is why, at this time, `kas` is only available for select customers and projects. If you would like to try it out, [reach out to me](/company/team/#nagyv-gitlab) in e-mail or by mentioning me in an issue with your project ID, and we will authorize your project.\n\nGitLab's `kas` instance is available at `wss://kas.gitlab.com`. You will have to provide this value together with a registered agent access token when you deploy `agentk` to your cluster. You can [follow the installation instructions from our documentation](https://docs.gitlab.com/ee/user/clusters/agent/#define-a-configuration-repository) starting with defining a configuration repository.\n\n### How deployments work\n\nIf you prefer a video walk-through, we demonstrate how pull-based deployments work with the Agent below.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/17O_ARVaRGo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nFor deployments, we share some codebase with ArgoCD since this part of the Agent is built on the [gitops-engine](https://github.com/argoproj/gitops-engine/). The `gitops-engine` provides a simple tool to keep git repositories synced with cluster resources. The Agent is configured in code. What we call the \"agent configuration project\" references the repositories containing the Kubernetes manifests which are the resource definitions describing the expected state of your cluster. Whenever these manifests change, the Agent automatically pulls the new configuration and applies it in the cluster.\n\n#### An example using Helm\n\nToday, the GitLab Agent for Kubernetes only supports pull-based deployments, but we are working on connecting it with GitLab CI to also provide push-based deployment support. So far, we have created a simple example repository that shows how someone might use the Agent together with Helm to install the GitLab Runner in their cluster.\n\nOne critique of Helm is that you might get different deployments without changing anything in the code you manage. We want to make sure that your manifest projects reflect what is expected to be deployed in your cluster. This is why we recommend that you use GitLab CI to generate and commit the final Kubernetes manifests from your preferred templating tool, and let the Agent take care of deploying the rendered templates. We follow this pattern in the example repository too.\n\n### Kubernetes network security alerts\n\nIn [GitLab 13.9](/releases/2021/02/22/gitlab-13-9-released/) we are [shipping an integration with Cilium built on top of the Agent](/releases/2021/02/22/gitlab-13-9-released/#configmap-support-for-kubernetes-agent-server). The integration provides a simple way to generate network policy-related alerts and to surface those alerts in GitLab. Watch the video below for a demo:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/mFpXUvcAT1g\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Ongoing developments\n\nWhile we think that the Agent can already bring great value to Silver and Gold-level GitLab users, we are working constantly to build even more features on top of it.\n\nOur primary focus now is to make the Agent generally available on GitLab.com SaaS. We are also working on a set of features that allows a user to connect GitLab CI with clusters securely using the Agent. This allows existing push-based deployments to start easily using the Agent and the integrations coming with it.\n\nWe are excited to see how you will benefit from the Agent and what amazing things you will build with it.\n\n## Read more on Kubernetes:\n\n- [How to install and use the GitLab Kubernetes Operator](/blog/gko-on-ocp/)\n\n- [Threat modeling the Kubernetes Agent: from MVC to continuous improvement](/blog/threat-modeling-kubernetes-agent/)\n\n- [How to deploy the Agent with limited permissions](/blog/setting-up-the-k-agent/)\n\n- [Understand Kubernetes terminology from namespaces to pods](/blog/kubernetes-terminology/)\n\n- [What we learned after a year of GitLab.com on Kubernetes](/blog/year-of-kubernetes/)\n",[1091,1297,9],{"slug":2828,"featured":6,"template":680},"gitlab-kubernetes-agent-on-gitlab-com","content:en-us:blog:gitlab-kubernetes-agent-on-gitlab-com.yml","Gitlab Kubernetes Agent On Gitlab Com","en-us/blog/gitlab-kubernetes-agent-on-gitlab-com.yml","en-us/blog/gitlab-kubernetes-agent-on-gitlab-com",{"_path":2834,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2835,"content":2840,"config":2845,"_id":2847,"_type":14,"title":2848,"_source":16,"_file":2849,"_stem":2850,"_extension":19},"/en-us/blog/gitlab-live-event-recap",{"title":2836,"description":2837,"ogTitle":2836,"ogDescription":2837,"noIndex":6,"ogImage":945,"ogUrl":2838,"ogSiteName":667,"ogType":668,"canonicalUrls":2838,"schema":2839},"Here's what went down at #GitLabLive","We went live today to discuss our $100m Series D funding and what's next for GitLab – catch up on the recording here.","https://about.gitlab.com/blog/gitlab-live-event-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Here's what went down at #GitLabLive\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-09-20\",\n      }",{"title":2836,"description":2837,"authors":2841,"heroImage":945,"date":2842,"body":2843,"category":299,"tags":2844},[2353],"2018-09-20","\n\nAfter [yesterday's big news](/blog/announcing-100m-series-d-funding/), we held a\n[#GitLabLive](https://twitter.com/search?q=%23GitLabLive&src=tyah) event today to dive into what this means for us, for you, and for GitLab the product.\n\nAs you can tell, we were all pretty excited about it:\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\">\u003Cp lang=\"en\" dir=\"ltr\">Waiting for \u003Ca href=\"https://twitter.com/hashtag/GitLabLive?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabLive\u003C/a> like \u003Ca href=\"https://t.co/eqw4ljZXaa\">pic.twitter.com/eqw4ljZXaa\u003C/a>\u003C/p>&mdash; Brendan O&#39;Leary 👨🏻‍💻 (@olearycrew) \u003Ca href=\"https://twitter.com/olearycrew/status/1042809056275193856?ref_src=twsrc%5Etfw\">September 20, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nHosted by Director of Cloud Native Alliances [Priyanka Sharma](https://twitter.com/pritianka), the event covered GitLab past,\npresent, and future with GitLab team-members, investors, and customers. You can watch the whole thing below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZgFqyXCsqPY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Get the slides\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vTO_mVE0psqDSIOwmrv30ebL0IMdAIhYFHqBcoqI6b8_Cl1yl8f6FaAIm-d7qwsOWhhiUIqPxo6fjhH/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"1280\" height=\"749\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\nJust as an aside:\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">There’s a rumor going around that we borrowed the set of “Between Two Ferns” for \u003Ca href=\"https://twitter.com/hashtag/GitLabLive?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabLive\u003C/a>, can neither confirm nor deny 😆\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1042830634366853121?ref_src=twsrc%5Etfw\">September 20, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n## Highlights\n\n### [Company update](https://youtu.be/ZgFqyXCsqPY?t=57s)\n\nOur CEO and co-founder [Sid Sijbrandij](/company/team/#sytses) chats with Priyanka about some major company\nmilestones, from our beginnings at YCombinator, to recently being ranked [#44 on Inc. 5000's\nlist of the fastest-growing companies in the US for 2018](/blog/gitlab-ranked-44-on-inc-5000-list/), and our [CI solution being recognized by Forrester as a Leader in that space](/blog/gitlab-leader-continuous-integration-forrester-wave/). He also talks about our acquisition of Gemnasium, which spurred development of GitLab's integrated security features, as well as our focus on cloud native and [Kubernetes](/solutions/kubernetes/).\n\nReaching 2,000 contributors recently was also a landmark achievement for us, and we're proud to have switched to a [DCO for source code contributions](/blog/gitlab-switches-to-dco-license/) to make it even easier for everyone to contribute. We're thrilled to have a number of [foundational open source projects call GitLab home](/blog/welcome-gnome-to-gitlab/) now, giving weight to [#movingtogitlab](/blog/why-move-to-gitlab/).\n\nAll of this has been building towards yesterday's news: our [series D funding of $100 million](/blog/announcing-100m-series-d-funding/) to help us realize our vision of beating out nine other, best-in-class products with a single application.\n\nHow does it feel being part of the unicorn club?\n\n> \"It's exciting. When we came to Silicon Valley, YCombinator explained to us that if you're going to raise money from external investors, your aim should be to become a billion dollar company. Otherwise you should not raise any money and we seriously considered it. But we opted to raise the money and now our early shareholders can feel confident that we've got here. But we raised more money now so the bar is higher and we're going to try to keep growing the company.\" - Sid Sijbrandij, CEO, GitLab\n\n### [Why invest in GitLab?](https://youtu.be/ZgFqyXCsqPY?t=14m20s)\n\nMatthew Jacobson, General Partner at ICONiQ Capital, joins to share some insight into why they've invested in GitLab. With a focus on growth-stage investments, they look for product velocity and the strength and quality of the team.\n\nThe conversation between ICONiQ and GitLab started over two years ago, where the \"maniacal focus on product\" at GitLab became clear and the breadth of our ambition made a real impression. Nine categories is an ambitious product vision!\n\n### [Scaling a remote work culture](https://youtu.be/ZgFqyXCsqPY?t=22m4s)\n\nOur culture is extremely important to us. Chief Culture Officer [Barbie Brewer](/company/team/#BarbieJBrewer) joins to shed some light on how we preserve it as we scale, keeping people front and center at all times:\n\n> \"We focus on working with the best people, getting the best contributors, and building the best product... We have our [values](https://handbook.gitlab.com/handbook/values/) at the core of everything we do. We give each other feedback and push each other to be better.\" - Barbie Brewer, Chief Culture Officer, GitLab\n\nThe company growing doesn't necessarily mean the workforce needs to grow 1:1: \"We're not just growing fast, we're growing smart,\" said Barbie.\n\nBarbie also reiterated our commitment to [diversity, inclusion and belonging](https://handbook.gitlab.com/handbook/values/#diversity-inclusion), sharing some of the ways we encourage and empower GitLab team-members to uphold these values and help each other to learn and grow along the way. As always, our [handbook](/handbook/) is our single source of truth.\n\n### [Product update](https://youtu.be/ZgFqyXCsqPY?t=33m58s)\n\nTo fill us in on what's new with GitLab the product, we're joined by [William Chia](/company/team/#thewilliamchia), Manager, Product Marketing. We delivered the full software development lifecycle at the end of 2016, then set our sights on [Concurrent DevOps](/blog/from-dev-to-devops/). William shares how conversations with users and customers alerted us to the \"toolchain crisis\" and how this has inspired us to deliver Concurrent DevOps with a single application covering the entire DevOps lifecycle.\n\n#### [User perspective: Why GitLab?](https://youtu.be/ZgFqyXCsqPY?t=51m26s)\n\nWe hear from Michael Sobota, Director of Product Integration at Charter Communications, about their company goals of quick, iterative development, shifting operations concerns left, and how they're using GitLab as their DevOps platform to get there. They've gone from feedback cycles of **two weeks** to a matter of **minutes** – ultimately helping them to deliver a better customer experience.\n\n> \"Gone are the days of managing multiple build machines. It's all in the power of developers.\" - Michael Sobota, Director of Product Integration, Charter Communications\n\n### [Product vision](https://youtu.be/ZgFqyXCsqPY?t=1h2m50s)\n\nHead of Product [Mark Pundsack](/company/team/#MarkPundsack) joins to share our ambitious product vision, and we're so excited about it, we're dedicating a post to it on its own! In case you just can't wait, here's the rundown:\n\n1. GitLab is a complete [DevOps platform](/solutions/devops-platform/), delivered as a single application, enabling [Concurrent DevOps](/topics/concurrent-devops/).\n1. We're going to double down on what's working and focus on depth, breadth, and adding new roles to the product.\n1. In 2019, we plan to become leaders in four new areas: project management, continuous delivery and release automation, application security testing, and value stream management.\n1. We have 26 new product capabilities planned for 2019.\n1. DevOps isn't just about developers and operations. We plan to cover roles like designers and product managers so everyone can work concurrently in a single product.\n\n### [Q&A](https://youtu.be/ZgFqyXCsqPY?t=1h13m51s)\n\nSid's back in the house! He answers some audience questions, and encourages everyone to make suggestions for how to improve GitLab:\n\"Many times the hardest thing is figuring out what to make, not how to make it.\" More proof that [everyone can contribute](/company/strategy/#why)!\n",[277,675,9,675,873],{"slug":2846,"featured":6,"template":680},"gitlab-live-event-recap","content:en-us:blog:gitlab-live-event-recap.yml","Gitlab Live Event Recap","en-us/blog/gitlab-live-event-recap.yml","en-us/blog/gitlab-live-event-recap",{"_path":2852,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2853,"content":2859,"config":2864,"_id":2866,"_type":14,"title":2867,"_source":16,"_file":2868,"_stem":2869,"_extension":19},"/en-us/blog/gitlab-markdown-tutorial",{"title":2854,"description":2855,"ogTitle":2854,"ogDescription":2855,"noIndex":6,"ogImage":2856,"ogUrl":2857,"ogSiteName":667,"ogType":668,"canonicalUrls":2857,"schema":2858},"A 5-minute Markdown tutorial","New to GitLab? New to Markdown? Here's a quick explainer on using Markdown to format text all over GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671172/Blog/Hero%20Images/markdown-tutorial-cover.png","https://about.gitlab.com/blog/gitlab-markdown-tutorial","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A 5-minute Markdown tutorial\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-08-17\",\n      }",{"title":2854,"description":2855,"authors":2860,"heroImage":2856,"date":2861,"body":2862,"category":743,"tags":2863},[2353],"2018-08-17","\n\nAt GitLab, we love [Markdown](https://docs.gitlab.com/ee/user/markdown.html) for providing a simple, clean way to add styling and formatting to plain text, that's visible and repeatable across multiple applications. This means you can copy and paste the text without losing the formatting, and it makes [reviewing diffs](https://docs.gitlab.com/ee/development/merge_request_concepts/diffs/) easier, as you're still reviewing plain text with no hidden data.\n\n## What is Markdown?\n\nMarkdown is a lightweight markup language created by John Gruber in 2004. Markdown lets you add formatting elements to plaintext text documents. Since its creation, markdown has become one of the world’s most popular markup languages. There are many web-based applications specifically built for writing in Markdown. Markdown syntax is designed to be readable and simple.\n\n## Markdown tutorial\n\nGitLab Product Marketing Manager [William Chia](/company/team/#thewilliamchia) recorded this five-minute Markdown tutorial for another GitLab team-member, so you can see how Markdown works within GitLab:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Ix416lAYRSg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## GitLab Flavored Markdown\n\nGitLab uses [GitLab Flavored Markdown](https://gitlab.com/help/user/markdown#gitlab-flavored-markdown-gfm) (GFM) for other handy functionality not supported by standard Markdown. Here are a few useful things you can do with GFM:\n\n### Reference issues, commits, merge requests, or team members\n\nWhen you type `#12` (or any number) in an issue, it will automatically create a link to the corresponding issue in that project. You can also [easily reference other GitLab-specific items](https://gitlab.com/help/user/markdown#special-gitlab-references).\n\n### Autolink URLs\n\n You don't have to use the standard `[]()` format to create a link: just pasting the URL will [autolink it](https://gitlab.com/help/user/markdown#url-auto-linking).\n\n### Create diagrams and flowcharts\n\nIn [GitLab 10.3](/releases/2017/12/22/gitlab-10-3-released/#flow-charts-sequence-diagrams-and-gantt-diagrams-in-gitlab-flavored-markdown-gfm-with-mermaid) we added the ability to [generate diagrams and flowcharts](https://gitlab.com/help/user/markdown#mermaid) using [mermaid](https://mermaidjs.github.io/).\n\n### Quick actions\n\nOpen or close issues, reassign merge requests, add todos, unsubscribe from issues – these are just a few things you can do with GFM [quick actions](https://docs.gitlab.com/ee/user/project/quick_actions.html), all without leaving your keyboard. Just type `/` and a list of options will appear.\n\nThese are just a few examples of GFM – see the [Markdown documentation](https://docs.gitlab.com/ee/user/markdown.html) for a full list. We're adding to it all the time: as of our last release you can quickly [make an issue confidential](/releases/2018/07/22/gitlab-11-1-released/#confidential-issue-quick-action) right from the issue comment field. This was a community contribution, and we invite you to [contribute](/community/contribute/) functionality and quick actions you'd find useful too!\n\n## Benefits of using Markdown\n\nSome may be skeptical of using Markdown when there are other options – like a WYSIWYG editor. But the benefits of using markdown are hard to ignore:\n\n* Markdown is crazy versatile. It can be used for everything including (but not limited to) websites, notes, presentations, emails, and documents of all kinds.\n* Markdown isn’t picky about its operating system. You can create Markdown-formatted text on any device running any operating system.\n* Markdown can be used on the move, so to speak. Markdown-formatted text can be opened using virtually any application. You can also import your Markdown files into another Markdown application if you decide to make a change.\n* The Markdown text you create won’t become obsolete. Even if the application you’re using stops working down the line, you’ll still be able to read your Markdown-formatted text using a text editing application.\nThe fact that it is the backbone of so much web content means that you might be the odd one out if you DON’T use it.\n\n## How to get started with Markdown\n\nThere are a few ways you can learn about how to get started with Markdown.\n\nThe first is to check out online tutorials. You can find a number of resources on Markdown, including the [original guide by John Gruber](https://daringfireball.net/projects/markdown/) and a [Markdown Tutorial](https://www.markdowntutorial.com/) open-source website that you can use to try out Markdown in your web browser.\n\nOr, just try it out with the Notepad application on a device. Since Markdown is just plain text, you can write it in any text editor, such as Notepad. Save a file with the .MD file extension to make a proper Markdown file.\n\nThe second (and a highly encouraged) way to get the hang of Markdown is to check out some [free online Markdown editors](https://www.makeuseof.com/tag/online-markdown-editors/) to test the waters - many of which are great for just learning how to write in Markdown. Markdown editors like StackEdit and Dillinger can help your efforts to get started with Markdown.\n\nFor the most optimal Markdown experience, a writing app that's built for Markdown is typically the best way to go.\n",[993,9],{"slug":2865,"featured":6,"template":680},"gitlab-markdown-tutorial","content:en-us:blog:gitlab-markdown-tutorial.yml","Gitlab Markdown Tutorial","en-us/blog/gitlab-markdown-tutorial.yml","en-us/blog/gitlab-markdown-tutorial",{"_path":2871,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2872,"content":2877,"config":2882,"_id":2884,"_type":14,"title":2885,"_source":16,"_file":2886,"_stem":2887,"_extension":19},"/en-us/blog/gitlab-mental-health-awareness-week-recap",{"title":2873,"description":2874,"ogTitle":2873,"ogDescription":2874,"noIndex":6,"ogImage":690,"ogUrl":2875,"ogSiteName":667,"ogType":668,"canonicalUrls":2875,"schema":2876},"GitLab Mental Health Awareness Week Recap","A recap of the Learning and Development Mental Health Awareness week","https://about.gitlab.com/blog/gitlab-mental-health-awareness-week-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Mental Health Awareness Week Recap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Samantha Lee\"}],\n        \"datePublished\": \"2020-12-21\",\n      }",{"title":2873,"description":2874,"authors":2878,"heroImage":690,"date":2879,"body":2880,"category":698,"tags":2881},[1594],"2020-12-21","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n\nAs an [all-remote](https://about.gitlab.com/company/culture/all-remote/guide/#why-remote) company, the GitLab team is distributed across the globe. Our team is used to working online, scheduling Zoom social hours, and using asynchronous communication strategies.\n\nEven with all these work from home skills in our back pockets, 2020 has been a challenge. Over the year, we've adapted to new work environments, taken on new roles within our families and communities, and found new and creative ways to connect from a distance. It's been chaotic and it has taken a toll on our mental health.\n\nOver the last few months, the [Learning and Development (L&D) team at GitLab](https://about.gitlab.com/handbook/people-group/learning-and-development/\n) heard team members express feelings of burnout. Personally, in [coffee chats](https://about.gitlab.com/company/culture/all-remote/informal-communication/#coffee-chats) and slack conversations, I heard team members speak of feeling exhausted and overwhelmed. The combination of maintaining a regular work schedule, caring for family, and finding time to relax and recharge, all while living through a global pandemic, is taking its toll.\n\nI could relate. I was feeling this overwhelm, too.\n\nIn response to these conversations, the L&D team launched an asynchronous [internal learning campaign](https://about.gitlab.com/handbook/people-group/learning-and-development/learning-initiatives/#internal-learning-campaigns) for the GitLab team with the goal of increasing awareness of, and access to, existing [mental health](https://about.gitlab.com/company/culture/all-remote/mental-health/\n) resources at GitLab. This was a new [learning initiative](https://about.gitlab.com/handbook/people-group/learning-and-development/learning-initiatives/#learning-initiatives-introduction) for the team, leveraging GitLab issues, Slack reminders, polls, and a [learning speaker series](https://about.gitlab.com/handbook/people-group/learning-and-development/learning-initiatives/#learning-speaker-series-overview) to engage and educate team members.\n\nTake a few minutes to read the rest of this post to learn about the intentions behind the initiative, major takeaways, and what we're doing moving forward to continue the conversation.\n\n## Why participate asynchronously?\n\n[Asynchronous communication](https://about.gitlab.com/company/culture/all-remote/asynchronous/) gives team members the opportunity to work [efficiently](https://handbook.gitlab.com/handbook/values/#efficiency), [collaborate](https://handbook.gitlab.com/handbook/values/#collaboration), and put [friends and family first](https://handbook.gitlab.com/handbook/values/#family-and-friends-first-work-second). \n\nWhen it comes to engaging learning content, applying asynchronous strategies can be challenging. Many learners are used to learning in collaborative, co-located groups or calls. The GitLab L&D team is always exploring and experimenting with new ways to make asynchronous learning just as engaging as synchronous learning. With this campaign, we used [GitLab issues](https://gitlab.com/gitlab-com/people-group/learning-development/challenges/-/boards), Slack, the [Polly app](https://www.polly.ai/), and Zoom to deliver information and host discussion.\n\nThis awareness campaign needed to be designed with as many asynchronous elements as possible to\n\n1. Make content accessible and consumable for all team members, regardless of their time zone or location\n1. Avoid creating additional overwhelm for participants related to attending synchronous calls, and instead let team members review content on their own time\n1. Document content for future self-paced learning paths\n\nIn addition to making this awareness campaign asynchronous, all participation was optional. Discussing mental health and [burnout](https://about.gitlab.com/blog/preventing-burnout/) can be challenging and uncomfortable. We wanted to allow space to discuss burnout only when team members felt comfortable and ready.\n\n\n## So, how'd it go!\n\nA few great wins from the week:\n\nFirst, we collaboratively stood up our [mental health tool stack](https://about.gitlab.com/company/culture/all-remote/mental-health/#mental-health-tool-stack) as part of our [day 2 issue](https://gitlab.com/gitlab-com/people-group/learning-development/challenges/-/issues/35). Team members were asked to open an MR and contribute tools they use to manage burnout. Together we collected 12 resources. If you have one to add, please [contribute!](https://about.gitlab.com/community/contribute/)\n\nSecond, we created, tested, and documented a new learning initiative, [internal learning campaigns!](https://about.gitlab.com/handbook/people-group/learning-and-development/learning-initiatives/#internal-learning-campaigns). The L&D team is exploring new ways to deliver bite-sized learning, and this is one we will try again in the future.\n\nAnd finally, we hosted a fantastic [live speaker series with John Fitch](https://www.youtube.com/watch?v=BDvpoouM-us&feature=emb_logo), author of the book [Time Off](https://www.timeoffbook.com/). Team members asked questions about how to take meaningful time off, how to return from PTO, and how managers can encourage team members to take time off. Approximately 100 team members attended synchronously, and many watched the recorded replay.\n\nA little more about John - he's the co-author of the international bestseller [Time Off: A Practical Guide to Building Your Rest Ethic and Finding Success Without the Stress](https://www.timeoffbook.com/), a book that expands our value of time off, and how our rest and leisure are as important as our work. John is a recovering workaholic who wrote this book for a former version of himself. He cares deeply about the future of work and is optimistic that everyone has the opportunity to join the creative class in the near future. John is now building tools for helping people and teams design their rest ethic and manage their time off more effectively. He would love to hear from you if you are passionate about intentional time off.\n\nWatch the replay of our live speaker series below!\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/BDvpoouM-us\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n\n## What didn't go so well?\n\nOne of the goals for the week was to increase the number of team members who answered 'Yes' to the following question: 'I know where and how to access resources to manage my mental health at GitLab'. Based on the Polly polls we shared in our #what's-happening-at-GitLab Slack channel, we saw a 3% increase in the number of team members who answered 'Yes', increasing from 73% to 76%\n\nHere are some screenshots of the poll data:\n\nOur initial poll data, collecting information _before_ the awareness week:\n\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/pre-poll-results.jpg){: .shadow}\n\n\nAnd our final poll data, collecting information _after_ the awareness week:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/post-poll-results.jpg){: .shadow}\n\n\nWhile this shows a slight increase, it's not enough, and the L&D team recognizes we need to do more as a company to communicate this information more widely and empower team members to use the available resources. A few issues we noticed with this data collection:\n\n1. We aren't sure if the people who took the first poll also took the second poll since the poll is anonymous\n1. We also aren't sure if the people who took either poll particiapted in any or all of the awareness week content. Since participation was completion optional, we didn't track who decided to get involved\n1. Fewer people responded to the final poll than the initial poll\n\n\n## Now what?\n\nThe role of Learning & Development at GitLab has evolved during Covid-19 to include more support for mental health and wellbeing of our team members. Looking  after team members wellness is no longer a passing priority. The increasing pace of monumental change and stress indicates otherwise. The pandemic is a marathon, not a sprint, and our role as learning leaders is equipping our team members with a set of tools to build resilience, manage through change, and take care of their mental health.\n\nThis internal awareness campaign was just the start of a series of learning opportunities the L&D team is creating for team members to explore their mental health and learn strategies for managing burnout. We're working on [new mental health and burnout management inititiaves](https://gitlab.com/groups/gitlab-com/people-group/learning-development/-/epics/24) for 2021 to continue this conversation beyond this awareness campaign.\n\nWe're also working on creating a self-paced learning path through this awareness campaign content, so that team members who missed the content, future team members, and our wider community can review the material. Follow the updates from our new GitLab Learn platform to find out when this learning path will be available.\n\nIn the meantime, we encourage you to check out the content from the week shared via GitLab issues!\n\n\n| Issue Link | Content |\n|",[811,9,832],{"slug":2883,"featured":6,"template":680},"gitlab-mental-health-awareness-week-recap","content:en-us:blog:gitlab-mental-health-awareness-week-recap.yml","Gitlab Mental Health Awareness Week Recap","en-us/blog/gitlab-mental-health-awareness-week-recap.yml","en-us/blog/gitlab-mental-health-awareness-week-recap",{"_path":2889,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2890,"content":2896,"config":2902,"_id":2904,"_type":14,"title":2905,"_source":16,"_file":2906,"_stem":2907,"_extension":19},"/en-us/blog/gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant",{"title":2891,"description":2892,"ogTitle":2891,"ogDescription":2892,"noIndex":6,"ogImage":2893,"ogUrl":2894,"ogSiteName":667,"ogType":668,"canonicalUrls":2894,"schema":2895},"GitLab named a Visionary in 2020 Gartner Magic Quadrant for Enterprise Agile Planning Tools","For the second consecutive year, Gartner validates our product vision.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666262/Blog/Hero%20Images/default-blog-image.png","https://about.gitlab.com/blog/gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab named a Visionary in 2020 Gartner Magic Quadrant for Enterprise Agile Planning Tools\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cormac Foster\"}],\n        \"datePublished\": \"2020-08-03\",\n      }",{"title":2891,"description":2892,"authors":2897,"heroImage":2893,"date":2899,"body":2900,"category":675,"tags":2901},[2898],"Cormac Foster","2020-08-03","\nGitLab was recently named a 'Visionary' by Gartner in their 2020 Magic Quadrant for Enterprise Agile Planning Tools. We're pleased to be recognized once again, despite being a fairly new entrant into the space.\n\nAs we [build toward lovability](/direction/maturity/) over the next year, we're excited to be recognized by industry experts like Gartner. While we continue to increase our breadth and depth, we also plan to double down on the unique benefits our single-application approach provides.\n\nIn the video below, [Justin Farris](/company/team/#justinfarris), group manager of Plan, lays out the team's long-term vision, including our five [jobs to be done](/direction/dev/#plan-1):\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube-nocookie.com/embed/bT60rJEoWhw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nWe recently released a suite of enhanced work planning and management features in [GitLab 13.2](/releases/2020/07/22/gitlab-13-2-released/), with a lot more to come. Over the next twelve months, we plan to focus on three core areas:\n\n### Building a world class Agile planning experience\n\n> \"Agile is the dominant means of creating software today because it enables organizations\n> to respond to change quickly, to learn rapidly, and to deliver continuously. Making use of\n> agile practices at scale is essential to digital business success.\" Gartner, Magic Quadrant\n> for Enterprise Agile Planning Tools\n\nWe agree. In its many forms, [Agile](/solutions/agile-delivery/) is the way forward for modern business. We can't be everything to everyone, and we don't want to recreate the same bloated project management solutions enterprises have been using for years. But we **do** want to be the best solution for managing Agile projects and portfolios that you can use to take your business forward.\n\nTo that end, we're focused on delivering solutions that help you elevate your Agile planning from project management to portfolio planning, regardless of industry or tool choice. We recently released a [requirements management](https://docs.gitlab.com/ee/user/project/requirements/) feature, which will open opportunities to use GitLab for entirely new businesses, and we launched a vastly improved [Jira import process](https://docs.gitlab.com/ee/user/project/import/jira.html) to make it easier to transition to a GitLab workflow. While we continue to iterate on both of those, we'll also be improving the overall management experience with easier-to-use Kanban boards, [enhanced portfolio and group roadmaps](https://docs.gitlab.com/ee/user/group/roadmap/), and more robust epics and milestones.\n\n### Visibility and value stream management\n\nOf course, you can't plan without data, so visibility is another key driver of our roadmap. [Value stream management](/solutions/value-stream-management/) is a hot topic these days. To many, it's a refocusing of decades-old [value stream mapping](https://en.wikipedia.org/wiki/Value-stream_mapping) techniques to the software development lifecycle, measuring value added throughout the software development process and identifying inefficiencies that might keep you from delivering more.\n\nMeasurement is an essential part of the process, but at GitLab, we can also help you close the loop and take action – the \"management\" of that value stream. As a single application for the [DevOps lifecycle](/topics/devops/), GitLab has the unique ability to help you discover process bottlenecks, drill into the sources of waste for the root cause, and actually make changes to address them, whether that's reassigning an MR, mentioning someone to unblock the issue, or committing code changes.\n\nIn recent releases, we unlocked more flexible value stream workflows with [customizable value stream analytics](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html#customizable-value-stream-analytics) and surfaced value metrics to more personas with [compliance](https://docs.gitlab.com/ee/user/compliance/compliance_report/index.html) and [security dashboards](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#instance-security-dashboard). In the coming months, we'll continue to enhance our drill-down reporting and resolution, focusing on additional value metrics, additional dashboards, and automated recommendations for action.\n\n### Our customers\n\nOf course, as happy as we are to be recognized by Gartner, our users are the most important source of product guidance. At GitLab, everyone can contribute, and we wouldn't be the same company without the active participation of our users. That's why we've made our [maturity plan](/direction/maturity/) and [product vision](/direction/#vision) public and open for comment. For more information about enterprise Agile Planning in the coming year, please read our [FY21 Plan](/direction/dev/#fy21-plan-whats-next-for-dev)—and let us know what you think!\n\n### Related links\n\n* [2020 Magic Quadrant for Enterprise Agile Planning Tools (available to Gartner subscribers)](https://www.gartner.com/document/3983813?ref=solrAll&refval=255086013)\n* [We're dogfooding a tool to help visualize high-level trends in GitLab projects](/blog/insights/)\n* [How Marketing uses GitLab to manage complex projects](/blog/gl-for-pm-prt-2/)\n\n_Gartner \"Magic Quadrant for Enterprise Agile Planning Tools,\" Keith Mann, et al, 21 April 2020\nGartner does not endorse any vendor, product or service depicted in its research publications and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s Research & Advisory organization and should not be construed as statements of fact. Gartner disclaims all warranties, expressed or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose._\n",[831,723,9],{"slug":2903,"featured":6,"template":680},"gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant","content:en-us:blog:gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant.yml","Gitlab Named Visionary In Gartner Agile Planning Magic Quadrant","en-us/blog/gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant.yml","en-us/blog/gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant",{"_path":2909,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2910,"content":2916,"config":2921,"_id":2923,"_type":14,"title":2924,"_source":16,"_file":2925,"_stem":2926,"_extension":19},"/en-us/blog/gitlab-names-joel-krooswyk-as-its-first-federal-cto",{"title":2911,"description":2912,"ogTitle":2911,"ogDescription":2912,"noIndex":6,"ogImage":2913,"ogUrl":2914,"ogSiteName":667,"ogType":668,"canonicalUrls":2914,"schema":2915},"GitLab names Joel Krooswyk as its first Federal CTO","New role reaffirms company’s commitment to the public sector.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669378/Blog/Hero%20Images/bab_cover_image.jpg","https://about.gitlab.com/blog/gitlab-names-joel-krooswyk-as-its-first-federal-cto","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab names Joel Krooswyk as its first Federal CTO\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-11-14\",\n      }",{"title":2911,"description":2912,"authors":2917,"heroImage":2913,"date":2918,"body":2919,"category":675,"tags":2920},[950],"2022-11-14","[Gitlab Federal](/solutions/public-sector/), LLC, provider of The One DevOps Platform for the public sector, announced that [Joel Krooswyk](https://gitlab.com/jkrooswyk), former Senior Manager of Solutions Architecture, has been named Federal CTO.\n\n![Photo of Joel Krooswyk](https://about.gitlab.com/images/blogimages/krooswyk.jpg){: .shadow.small.left.wrap-text}\n\n“The creation of the Federal CTO position recognizes the importance of the public sector in the world of DevSecOps. Joel’s experience allows him to provide expert insight to government agencies as they seek guidance on DevOps practices, building software factories, meeting compliance requirements and more,” says [Bob Stevens](https://gitlab.com/bstevens1), Vice President of Public Sector at GitLab. “We are excited to reaffirm our commitment to the public sector through this new role and Joel’s appointment.”\n\nAs Federal CTO, Krooswyk will ensure that GitLab has a voice in developing key [DevSecOps](/topics/devsecops/) practices coming from standards bodies, Congressional committees, industry working groups, and other influential organizations. He also will assist GitLab in continuing to build and strengthen relationships with federal DevSecOps professionals to help them streamline and secure their software development environments with a DevSecOps platform.\n\n“This is an exciting time in DevSecOps, and the federal government is on the leading edge, helping navigate such challenging issues as software supply chain security and regulatory compliance. I am thrilled to step into this new role and to be GitLab’s voice at the table, ensuring that our software development and security technology and practices are reflected in efforts across the public sector,” Krooswyk says.\n\nKrooswyk has actively been involved in GitLab’s growth since 2017. He has 25 years of experience in the software industry. His experience spans development, QA, product management, portfolio planning, and technical sales, and he has written a half million lines of unique code throughout his career. Joel holds a B.S. in Electrical Engineering from Purdue University as well as multiple industry certifications.",[675,1440,9,184],{"slug":2922,"featured":6,"template":680},"gitlab-names-joel-krooswyk-as-its-first-federal-cto","content:en-us:blog:gitlab-names-joel-krooswyk-as-its-first-federal-cto.yml","Gitlab Names Joel Krooswyk As Its First Federal Cto","en-us/blog/gitlab-names-joel-krooswyk-as-its-first-federal-cto.yml","en-us/blog/gitlab-names-joel-krooswyk-as-its-first-federal-cto",{"_path":2928,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2929,"content":2935,"config":2941,"_id":2943,"_type":14,"title":2944,"_source":16,"_file":2945,"_stem":2946,"_extension":19},"/en-us/blog/gitlab-news",{"title":2930,"description":2931,"ogTitle":2930,"ogDescription":2931,"noIndex":6,"ogImage":2932,"ogUrl":2933,"ogSiteName":667,"ogType":668,"canonicalUrls":2933,"schema":2934},"An announcement from GitLab CEO Sid Sijbrandij","Earlier today, GitLab CEO Sid Sijbrandij sent the following note to GitLab team members.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099630/Blog/Hero%20Images/Blog/Hero%20Images/logoforblogpost_logoforblogpost.jpg_1750099629774.jpg","https://about.gitlab.com/blog/gitlab-news","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An announcement from GitLab CEO Sid Sijbrandij\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2023-02-09\",\n      }",{"title":2930,"description":2931,"authors":2936,"heroImage":2932,"date":2937,"body":2938,"category":2939,"tags":2940},[950],"2023-02-09","**Earlier today, GitLab CEO Sid Sijbrandij sent the following note to GitLab team members.**\n\nDear GitLab team members,\n\nI have made the decision to reduce the size of our team by 7%. \n\nThis was a very difficult decision, and I understand this may be unexpected to some of you. I’d like to give some context about how we arrived at this outcome. \n\nThe current macroeconomic environment is tough, and as a result, companies are still spending but they are taking a more conservative approach to software investments and are taking more time to make purchasing decisions.\n\nI had hoped reprioritizing our spending would be enough to withstand the growing global economic downturn. Unfortunately, we need to take further steps and match our pace of spending with our commitment to responsible growth.\n\nWe are sad to say goodbye to talented team members who have played an integral part in GitLab's journey to date, and I am thankful for their significant contributions. I am sorry to see them leave the company because of this decision.\n\nEveryone leaving has received a meeting invitation from a manager who will provide additional information.  We are committed to helping you through this challenging time in the following ways:\n\n**Pay through a transition period:** Continued payment to team members who are leaving through the transition period, which may vary by region.\n\n**Severance:** A single payout equal to four months base salary, and payments will be made according to local processes and timing requirements.\n\n**Equity:** We’re accelerating vesting through 2023-03-15 and removing the vesting cliff for team members who have been granted equity and have been with us for under six months.\n\n**Healthcare:** Based on location and current benefit options previously selected by team members, healthcare premiums will be covered for up to six months, where possible.  Modern Health for mental health support will continue for all team members for six months.\n\n**Hardware:** Team members can keep their hardware and home office equipment subject to our security protocols.\n\n**Career support:** We will provide outplacement services with a third-party vendor, including coaching, resume building and guidance, and job-seeking support.\n\nWe know this can be an unsettling experience for team members who are staying. It can be hard to see valued team members leave, and we will host a series of Ask Me Anythings (AMAs) to answer your questions.\n\nSid\n","News",[675,9],{"slug":2942,"featured":6,"template":680},"gitlab-news","content:en-us:blog:gitlab-news.yml","Gitlab News","en-us/blog/gitlab-news.yml","en-us/blog/gitlab-news",{"_path":2948,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2949,"content":2955,"config":2961,"_id":2963,"_type":14,"title":2964,"_source":16,"_file":2965,"_stem":2966,"_extension":19},"/en-us/blog/gitlab-product-navigation",{"title":2950,"description":2951,"ogTitle":2950,"ogDescription":2951,"noIndex":6,"ogImage":2952,"ogUrl":2953,"ogSiteName":667,"ogType":668,"canonicalUrls":2953,"schema":2954},"Inside the vision for GitLab’s new platform navigation","A peek into what inspired our new navigation design, which is coming soon.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668078/Blog/Hero%20Images/cover-image-helm-registry.jpg","https://about.gitlab.com/blog/gitlab-product-navigation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside the vision for GitLab’s new platform navigation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christen Dybenko\"}],\n        \"datePublished\": \"2023-05-01\",\n      }",{"title":2950,"description":2951,"authors":2956,"heroImage":2952,"date":2958,"body":2959,"category":1340,"tags":2960},[2957],"Christen Dybenko","2023-05-01","\n\nSoon, we’ll be launching an entirely redesigned navigation in the GitLab product that is based on feedback from users. We’re both excited and a little nervous because navigation is so critical to every user’s workflow. That’s why we made a thoughtful shift in our iteration strategy, taking extra time and intention to develop a new and refined vision. We'd like to share a peek into how we ended up where we did and why we are so excited for our new design!\n\n## We had to invest in the right user experience\n\nBecause it has such an obvious impact on user experience, a navigation overhaul is no small feat. That’s why we fully funded a team to work exclusively on navigation, and provided the time and space to create the best experience possible. During the past year, we put a big focus on design ideation and UX research. It was a lot of work, but we believe this level of user focus has really paid off. \n\nBacked by our amazing design and product leadership team, we put much of our focus on the new navigation for more than nine months while we designed and tested it with end users.\n\nIn this blog post, we’ll share insights on our process, what we learned, and our vision for the future.\n\n![New navigation](https://about.gitlab.com/images/blogimages/2023-04-20-new-navigation-vision/new-navigation-vision.png){: .shadow}\n\n## Predicting what users will need\n\nWhen we first started to think about how to redesign our navigation, the challenge seemed overwhelming. How do we know how to make the best decisions for our navigation? How can anyone know which design or solution is *right*?\n\nWe did not want to make users unhappy for even a short period of time. At GitLab, we have [15 user personas](/handbook/product/personas/#user-personas), incredibly savvy users, and so many different workflows. We had to consider opinions that were not present in our backlog. For example, our power users can be very verbose in issues, but new users are not.\n\nIt is a huge undertaking to get to this kind of understanding and know what is right. Time pressure and needing to ship quickly could have made this type of work impossible at this scale.\n\nThankfully, our team dedicated to navigation was amazing. They invested time to reveal our users' key pain points with navigation, which set the litmus test by which we could evaluate every mockup and solution.\n\n## Establishing a north star\n\nBefore we wrote a line of code or started planning, we did a crucial piece of alignment to know our goals. Our design team led us in a north star exercise where we examined every piece of [System Usability Score (SUS)](/handbook/product/ux/performance-indicators/system-usability-scale/) feedback we had received on navigation.\n\nWe coded this feedback and [three themes](/direction/manage/foundations/navigation_settings/#1-year-plan) emerged. We needed to: \n\n- minimize feelings of being overwhelmed\n- orient users across the platform\n- allow users to pick up where they left off easily\n\nThis north star was amazing for understanding the problem and how to proceed. We learned _a lot_ about what our users’ pain points are and what our users struggle with daily.\n\nThankfully, this also helped us remove the dread of trying to ship something with the impossible goal of being all things to all people as we could now test these three themes with any persona.\n\nWe applied the themes to every design validation effort that we conducted with users moving forward. Our UX Research team also conducted interviews to understand how users felt about these specific themes. It felt incredible to have these insights available right from the start. It was also empowering to let some of the noise go to focus more clearly on what matters and what would move us forward.\n\n## Shifting our perspective on iteration for the right user experience\n\nGitLab is amazing at [iteration](/handbook/engineering/workflow/iteration/), and lately, we’ve been raising the bar on the quality of our [MVCs](/handbook/product/product-principles/#the-minimal-viable-change-mvc) and [definition of done](https://docs.gitlab.com/ee/development/contributing/merge_request_workflow.html?#ui-changes) with the goal of not degrading the current user experience. For navigation, we took this extra seriously, with the intention of protecting every part of the navigation experience.\n\nAs we reviewed the history of many iterative navigation updates over the past five years, we could see that there was very little overall consistency in the code and in the intention of the updates. This is what happens at fast-moving startups, and it can be ok for a period of time, but at some point, it's necessary to take a pause to strip things back for a meaningful change. The small iterations over time gave us an indication of pain points overall, and we needed a thoughtful plan to proceed. \n\nWe decided that anything we change in this new navigation should not degrade a user’s core workflow. We would first hit a baseline for what currently exists in navigation and then make meaningful updates. We agreed that anything we ship after our Alpha had to be fully usable by our own team. We didn’t want users to feel like we’d moved backward or that they had lost functionality in this next phase.\n\nSo, while we have some exciting features planned for the future, we won’t take action on them until we fully refine the core features and address user feedback.\n\n## Iterations now and vision for the next year \n\nWhile holding the baseline promise of no degradation in the new navigation, we did find opportunities to ship small iterations to our current navigation since January. First, we shipped a new navigation called “Your Work” and second, we shipped a new “Explore” menu to all users. Those menus are central to our new navigation vision, but they improved the legacy navigation, too.\n\nAfter launch, we can’t wait to improve even further with more customizable navigation experiences like allowing pins on Your Work and seamless integration with search, command line, and keyboard use. We also have ideas on how to add better landing pages that make life more custom in GitLab, and we couldn’t do that without this new navigation.\n\n## No one likes a navigation re-design\n\nAll that said, we know that no one actually likes a navigation redesign, even if it is best in the long run. Core workflows are ingrained muscle memory that no one wants to mess with if possible.\n\nThat’s why we are releasing our new navigation with a built-in on/off switch. With this approach, you can gradually move to the new navigation by switching back and forth for a little while, as needed. \n\nOur hope is that you’ll take a similar approach and share your feedback along the way, too. We want to hear about your experiences, so please be honest and your feedback will help us iterate.\n\n## What to expect for rollout\n\nWe are proud of our vision for a new navigation! Over the next few months, our new navigation will be available via an opt-in process in the user profile menu, and we'd love your feedback. Watch our Twitter, upcoming release posts, and our [direction page](/direction/manage/foundations/navigation_settings/) for more information!\n",[1342,1698,677,9],{"slug":2962,"featured":6,"template":680},"gitlab-product-navigation","content:en-us:blog:gitlab-product-navigation.yml","Gitlab Product Navigation","en-us/blog/gitlab-product-navigation.yml","en-us/blog/gitlab-product-navigation",{"_path":2968,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2969,"content":2975,"config":2980,"_id":2982,"_type":14,"title":2983,"_source":16,"_file":2984,"_stem":2985,"_extension":19},"/en-us/blog/gitlab-product-vision",{"title":2970,"description":2971,"ogTitle":2970,"ogDescription":2971,"noIndex":6,"ogImage":2972,"ogUrl":2973,"ogSiteName":667,"ogType":668,"canonicalUrls":2973,"schema":2974},"GitLab's product vision for 2019 and beyond","Watch Head of Product, Mark Pundsack, present our product vision.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671613/Blog/Hero%20Images/gitlab-innovate-cover.png","https://about.gitlab.com/blog/gitlab-product-vision","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's product vision for 2019 and beyond\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2018-10-01\",\n      }",{"title":2970,"description":2971,"authors":2976,"heroImage":2972,"date":2977,"body":2978,"category":299,"tags":2979},[950],"2018-10-01","\n\nWe [recently went live](/blog/gitlab-live-event-recap/) to discuss the\nnews of our [Series D funding](/blog/announcing-100m-series-d-funding/)\nand what the future holds for GitLab. You can watch GitLab's Head of Product,\nMark Pundsack, present our vision with some previews of what's in the works\nbelow:\n\n## Watch the recording\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZgFqyXCsqPY?start=3796\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### View the slides\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vRCKP-VcLD9IomS8d1U8N73dfFWLtsVCAPtGiKBwlIv68U6tlZViv6HGCk53Nd_8HxitqDN-lVvIaTE/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"960\" height=\"569\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>>\n\u003C/figure>\n\n## Summary of our product vision\n\nOur strategy is to double down on what's working: while we already cover the\nentire DevOps lifecycle, we want to increase depth in some of our existing\nfeatures, transitioning from minimum viable change to minimum loveable feature.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\">\u003Cp lang=\"en\" dir=\"ltr\">I thought \u003Ca href=\"https://twitter.com/Jobvo?ref_src=twsrc%5Etfw\">@Jobvo\u003C/a> had me at &quot;Minimal Viable Change&quot;.  But then \u003Ca href=\"https://twitter.com/MarkPundsack?ref_src=twsrc%5Etfw\">@MarkPundsack\u003C/a> comes out with &quot;Minimal Lovable Product&quot; and I&#39;m awestruck \u003Ca href=\"https://twitter.com/hashtag/GitLabLive?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabLive\u003C/a>\u003C/p>&mdash; Brendan O&#39;Leary 👨🏻‍💻 (@olearycrew) \u003Ca href=\"https://twitter.com/olearycrew/status/1042837763480068096?ref_src=twsrc%5Etfw\">September 20, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWe're also going to continue to increase our breadth, building out new\ncapabilities across the entire DevOps lifecycle.\n\nAnd finally, because we believe everyone can contribute, we're going to add more\nroles to the scope of product, including executives, designers, product\nmanagers, and essentially anyone who is involved in software development and\ndelivery. Our goal is to get everyone working concurrently in a single product,\nwith nine best-in-class categories.\n\n## Coming up...\n\nWe're working on building out 26 new capabilities, but because you don't have\nall day, below are three examples to give you a taste of what's in the works.\n\n\u003Csmall>*Obligatory disclaimer: These are mock-ups and the features may turn out\nlooking a little different, or may not ship at all*\u003C/small>\n\n### Executive flow: Value Stream Management\n\nAt its heart, [Value Stream Management](/solutions/value-stream-management/)\nis about understanding your teams' work and their workflow on the way to\ndelivering value to customers. The way we're approaching it is to extend\nsomething development teams are already using to track their work, namely issue\nboards, and bring it into the bigger picture by having a board that covers the\nentire workflow necessary to get ideas into production.\n\nBecause GitLab already covers that entire scope, we can automate it too. We know\nwhen a feature is scheduled. We know when you push your first commit. We know\nwhen code review starts. We know when you deploy your code to production. So we\ncan move the cards to the right spots automatically, so not only can you track\nyour progress and communicate it to your team, you can track it all\nautomatically and more accurately. Neat, huh?\n\n![Value Stream Management analytics view](https://about.gitlab.com/images/blogimages/product-vision-sep-20/vsm-analytics.png){: .shadow.medium.center}\n\nThe above mock-up demonstrates a situation where someone was able to dive into\nthe time spent on various areas, and see that the time spent waiting for someone\nto even start QA was really high, and they managed to shave off a few days just\nby rearranging some internal processes. The same goes for the code review cycle.\n\n### Ops flow: Incident management\n\nThis is an operations flow based on a new product capability:\n[incident management](https://gitlab.com/groups/gitlab-org/-/epics/349). We\nmonitor your production apps and detect an anomaly, alert you, and then open an\nincident. Then in one place you can see: what triggered the alert, who's\ninvolved in responding, quick links to the Slack conversation, Zoom call, and\nwhere to update your public status page. There's also a timeline of all\nactivity. Because this is part of the same application that developers are\nusing, it’s not just operations people using this tool, so when you’re working\ntogether on problems, you’re looking at the same data, and GitLab knows not only\nwhat metrics are alerting, but what code was recently deployed that might have\ncaused it, and who was behind that code. When the incident is resolved, you can\neasily follow up with your users with a postmortem, pulling in all the relevant\ndata and timeline of events. Of course, with all that data comes great power for\nanalytics, to help the team learn from the incidents and improve.\n\n![Incident open](https://about.gitlab.com/images/blogimages/product-vision-sep-20/incident-management-error-rate.png){: .shadow.medium.center}\n  *\u003Csmall>Mock-up showing an Incident open with timeline view, including Slack messages and Status page updates\u003C/small>*\n\n### Security flow: Auto remediate\n\nA common security task is watching for new vulnerabilities in your project’s\ndependencies. If a module you depend on has a vulnerability, there’s usually a\npatch update to go along with it. When that patch is released, you then need to\ntest your software again with that patch, to make sure everything still works\nbefore you deploy it. That’s a pain!\n\nInstead of making anyone do all that repetitive, but necessary security work,\n[we want to automate it all away](https://gitlab.com/groups/gitlab-org/-/epics/133).\nIn our vision, a bot detects that a dependency has a new version, and instead of\nalerting someone, automatically creates a merge request that bumps the version\nnumber for you, and runs the test suite to make sure that everything still\nworks. The CI pipeline passes, and confirms that the security vulnerability is\nnow gone, so the bot automatically merges the changes. If all goes well, your\nsecurity and development teams just get an email in the morning saying that all\nthe projects with that dependency were automatically fixed.\n\nBy why leave a known, security vulnerability live any longer than it needs to?\nTo bring it full circle, after merging, the CI/CD pipeline starts incrementally\ndeploying to production. If the production error rate jumps, we automatically\nstop the incremental rollout, and go ahead and roll back to the last-known good\nversion immediately. The bot detects this and automatically reverts the merge\nrequest so we can leave `master` in a good state. This, we can finally alert the\nteams about, so instead of having to test 20 projects manually, they can focus\non the few that can’t be automated.\n\n![Auto remediate reverted](https://about.gitlab.com/images/blogimages/product-vision-sep-20/auto-remediate-reverted.png){: .shadow.medium.center}\n  *\u003Csmall>Mock-up showing a merge request reverted automatically following detection of production errors\u003C/small>*\n\nAs always, our plans are in draft and we welcome your feedback and input!\n",[677,9],{"slug":2981,"featured":6,"template":680},"gitlab-product-vision","content:en-us:blog:gitlab-product-vision.yml","Gitlab Product Vision","en-us/blog/gitlab-product-vision.yml","en-us/blog/gitlab-product-vision",{"_path":2987,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2988,"content":2994,"config":2999,"_id":3001,"_type":14,"title":3002,"_source":16,"_file":3003,"_stem":3004,"_extension":19},"/en-us/blog/gitlab-raises-20-million-to-complete-devops",{"title":2989,"description":2990,"ogTitle":2989,"ogDescription":2990,"noIndex":6,"ogImage":2991,"ogUrl":2992,"ogSiteName":667,"ogType":668,"canonicalUrls":2992,"schema":2993},"Announcing $20 million in Series C round funding led by GV to complete DevOps","We went live today with The Changelog’s Adam Stacoviak and Jerod Santo to announce $20M in new funding, a new board member, and our vision for Complete DevOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671288/Blog/Hero%20Images/gitlab-live-event.png","https://about.gitlab.com/blog/gitlab-raises-20-million-to-complete-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing $20 million in Series C round funding led by GV to complete DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2017-10-09\",\n      }",{"title":2989,"description":2990,"authors":2995,"heroImage":2991,"date":2996,"body":2997,"category":299,"tags":2998},[950],"2017-10-09","Update: for the most recent status of complete [DevOps](/topics/devops/) please see our [Product Vision](/direction/) page.\n\nToday we are thrilled to announce our $20 million Series C funding led by GV. This follows [our Series B round last September](/blog/gitlab-master-plan/). With the help of our investors (and community!) we’re gearing up to bring you Complete DevOps, a reimagined scope of DevOps that unifies development and operations work into a single user experience.\n\n\u003C!-- more -->\n\nNot a GitLab user? [Install GitLab](/install/) or [sign in](https://gitlab.com/users/sign_in) to get started!\n{: .alert .alert-gitlab-orange}\n\nIn addition to our Series C funding round led by Dave Munichiello, GV General Partner, we’re excited to announce new board member,\nMatt Mullenweg, founder of WordPress.\n\n> \"The Fortune 500 is racing to build world-class software development organizations that mirror the speed,\nproductivity, and quality of the largest tech companies. As these organizations strive to produce high-quality\ncode at scale, they will need best-in-class tools and platforms. GitLab’s platform accelerates\nthe development process with an emphasis on collaboration and automation.\nGitLab’s hybrid, multi-cloud solution is loved by developers, and is seeing tremendous traction in the field.\" – Dave Munichiello, GV General Partner\n\n> \"GitLab’s powerful momentum and scaling have a lot of parallels to Automattic and WordPress in their early days.\nWordPress had to battle a lot of competitors, and ultimately came out on top as a successful company on an open source business model.\nI hope to help GitLab achieve the same triumph. Fundamentally, I want to help create the kind of internet that I want to live in\nand I want my children to live in, one that reaches a global audience and one that is able to make a difference.\"\n– Matt Mullenweg, founder of WordPress\n\nSince our start in 2014, we’ve had one mission: change all creative work from read-only to read-write so that everyone can contribute.\nLast year we unveiled GitLab’s Master Plan on September 13th, committing to shipping every stage of idea to production (which we completed in 8.15!).\nThis was a major step forward in simplifying the software development process.\nNow, we're taking it a step further to unite development and operations in one user experience.\nWatch the recording of our earlier live stream announcing our #CompleteDevOps vision below,\nand keep scrolling for a recap and the slides from the presentation.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/5dhjw-TT964\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\n## Complete DevOps\n\nBefore DevOps, the world of software iteration was slow, insecure, and error prone.\nDevOps came to the intersection of development and operations to create faster iteration cycles with greater quality and security.\n\nBut it didn't go far enough...\n\nIn the current landscape, developers and operations use different tools,\nthey don't have the ability to fully collaborate, and the need to integrate\nmany disparate tools continues to be a point of friction that slows progress\nand leads to insecure, poor quality code.\n\nComplete DevOps reimagines the scope of tooling to include both developers\nand operations teams in one unified solution. This dramatically reduces friction,\nincreases collaboration, and drives a competitive advantage.\n\nIn [10.0](/releases/2017/09/22/gitlab-10-0-released/), we shipped the first iteration of Auto DevOps,\nwhich just scratches the surface of the Complete DevOps features we have in the works.\nYou can read our Head of Product [Mark Pundsack](/company/team/#MarkPundsack)’s\ndetailed vision in [his blog post](/blog/devops-strategy/), but to summarize:\n\nWe want to build GitLab into the complete DevOps tool chain.\nWe already cover every stage of the software development lifecycle. Why stop at production?\nWhy not go beyond that, into operations? We want to close the loop between Dev and Ops,\nautomating processes and reducing complexity so that you can focus on a great customer experience.\n\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vRVKUjMMa7M7lPV04_TMgfmd2Fj_kEQYW9-RvKAtKf799_Dwbfvos8diqinI-Uhm1uTwPYCdAPPzun1/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"1280\" height=\"749\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\n### Why Complete DevOps?\n\n1. A single UI for development and operations means less time is wasted switching tools.\n2. All phases of DevOps are deeply integrated, so development and operations can work together collaboratively with less friction.\n3. The best practices of more than 100K organizations are built in by default.\n4. You benefit from a single install, with upgrades that don't break, no integration work, and one permission model.\n\nShare your thoughts, comments, and questions about #CompleteDevOps with us [on Twitter](https://twitter.com/gitlab)!\n\n### The cloud-native development solution\n\nThe software world is moving from virtual machines to cloud-native development.\nWe want to help ease this transition for companies, by offering a complete development and operations solution for cloud-native development.\n\n## Get involved\n\nWe 💜 our community! At GitLab, everyone can contribute and we owe GitLab’s existence to your enthusiasm,\ndrive, and hard work. Without our contributors’ belief in open source software, we would not be where we are today.\nWe need your help to make our collective vision a reality.\n\nWe are committed to standing by our [promise to be good stewards of open source](/blog/being-a-good-open-source-steward/),\nand keeping communication and collaboration amongst the community a high priority.\nOur open core business model ships both open and closed software.\nIn an effort to maintain an unprecedented level of transparency, we follow three key principles:\n\n1. [Development in the open](/blog/improving-open-development-for-everyone/). You can submit issues in a public issue tracker. This is not a read-only interface.\n1. [Business in the open](/blog/almost-everything-we-do-is-now-open/). Our company handbook and policies are in the open.\n1. [Clear direction](/direction/). Our Direction page clarifies the current project priorities and what is possible in the upcoming releases.\n\nRead more about our company values in our [open source](/blog/our-handbook-is-open-source-heres-why/) [handbook](https://handbook.gitlab.com/handbook/values/), licensed by [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/).",[675,9,873],{"slug":3000,"featured":6,"template":680},"gitlab-raises-20-million-to-complete-devops","content:en-us:blog:gitlab-raises-20-million-to-complete-devops.yml","Gitlab Raises 20 Million To Complete Devops","en-us/blog/gitlab-raises-20-million-to-complete-devops.yml","en-us/blog/gitlab-raises-20-million-to-complete-devops",{"_path":3006,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3007,"content":3013,"config":3019,"_id":3021,"_type":14,"title":3022,"_source":16,"_file":3023,"_stem":3024,"_extension":19},"/en-us/blog/gitlab-summit-cape-town-recap",{"title":3008,"description":3009,"ogTitle":3008,"ogDescription":3009,"noIndex":6,"ogImage":3010,"ogUrl":3011,"ogSiteName":667,"ogType":668,"canonicalUrls":3011,"schema":3012},"Salani kakuhle (bye!) and thanks for a great summit in Cape Town!","And just like that, it was all over. Check out the highlights and keynote from our recent summit in South Africa.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670482/Blog/Hero%20Images/summit_recap_pic_post.jpg","https://about.gitlab.com/blog/gitlab-summit-cape-town-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Salani kakuhle (bye!) and thanks for a great summit in Cape Town!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daisy Miclat\"},{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-09-14\",\n      }",{"title":3008,"description":3009,"authors":3014,"heroImage":3010,"date":3016,"body":3017,"category":299,"tags":3018},[3015,2353],"Daisy Miclat","2018-09-14","\n\nFrom August 23-29, 350 GitLab team-members, significant others, community members, and customers descended on Cape Town, South Africa to get to know one another IRL at our sixth [summit](/events/gitlab-contribute/). As an all-remote company, it’s not often we’re all in one place, so we get together every nine months to hang out, bond, take in the local sights, and even get a little work done.\n\n## Highlights\n\n### Keynote\n\nAfter getting settled in and, for many, powering through some brutal jetlag, we gathered for the opening keynote with Chief Culture Officer [Barbie Brewer](/company/team/#BarbieJBrewer), Chief Revenue Officer [Michael McBride](/company/team/#mmcb), Head of Product [Mark Pundsack](/company/team/#MarkPundsack), and CEO and co-founder, [Sid Sijbrandij](/company/team/#sytses), which you can watch below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4BIsON95fl8\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Challenge\n\nIt’s become [tradition at our summits for Sid to throw down the gauntlet with a few challenges](/blog/gitlab-summit-greece-recap/#summit-challenges), and this year’s was no different:\n\n![Cape Town summit challenges](https://about.gitlab.com/images/blogimages/summit2018/summit-challenge-slide.png){: .shadow.medium.center}\n\nAnd, as with previous summits, we were promised to be rewarded richly for meeting the challenges:\n\n![Cape Town summit challenges reward](https://about.gitlab.com/images/blogimages/summit2018/summit-challenge-win.png){: .shadow.medium.center}\n\nIt's also become tradition that we hit it out of the park 😎 We're happy to report that we were successful in challenges! Greg Brewer was convinced and is #movingtogitlab, and we've [added the ability to request a free instance check](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/6995).\n\n### Excursions\n\nThe summits also give us an amazing opportunity to get to know the area that we’re visiting. We were able to choose from some phenomenal excursions throughout Cape Town to learn more about the culture and history of what locals affectionately call the Mother City.\n\n\u003C!-- carousel -->\n\n\u003Cdiv id=\"carousel-example-generic-5\" class=\"carousel slide medium center\" data-ride=\"carousel\" data-interval=\"10000\">\n  \u003C!-- Indicators -->\n  \u003Col class=\"carousel-indicators\">\n    \u003Cli data-target=\"#carousel-example-generic-5\" data-slide-to=\"0\" class=\"active\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-5\" data-slide-to=\"1\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-5\" data-slide-to=\"2\">\u003C/li>\n  \u003C/ol>\n\n  \u003C!-- Wrapper for slides -->\n  \u003Cdiv class=\"carousel-inner\" role=\"listbox\">\n    \u003Cdiv class=\"item active\">\n          \u003Cimg src=\"/images/blogimages/summit2018/cape-of-good-hope.jpeg\" alt=\"Cape of Good Hope\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/summit2018/lanzerac-wine-tour.jpg\" alt=\"Lanzerac wine tour\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/summit2018/robben-island.jpg\" alt=\"Robben Island\">\n    \u003C/div>\n\n  \u003C/div>\n\n  \u003C!-- Controls -->\n  \u003Ca class=\"left carousel-control\" href=\"#carousel-example-generic-5\" role=\"button\" data-slide=\"prev\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-left\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M.44 10.13l8.345 8.345 2.007-2.007-6.814-6.814 6.814-6.815L8.785.832.44 9.177a.652.652 0 0 0-.202.477c0 .183.067.343.202.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Previous\u003C/span>\n  \u003C/a>\n  \u003Ca class=\"right carousel-control\" href=\"#carousel-example-generic-5\" role=\"button\" data-slide=\"next\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-right\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M10.59 10.13l-8.344 8.345L.24 16.468l6.814-6.814L.24 2.839 2.246.832l8.345 8.345a.652.652 0 0 1 .201.477.652.652 0 0 1-.201.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Next\u003C/span>\n  \u003C/a>\n\u003C/div>\n\n\n#### Boulders Beach and the Cape of Good Hope\n\nA beautiful tour along the coast and the opportunity to say hello to our furry friends, our first stop on this excursion was Boulders Beach, where we saw cute African Penguins waddling around, taking swims, and hanging out. They weren’t fazed by us humans. If anything they enjoyed the attention! Up next, we drove to the southernmost tip of Africa, through breathtaking, untouched terrain. Along the way, we spotted local wildlife including antelopes, ostriches, and a couple of feisty baboons.\n\n#### Robben Island\n\nA somewhat choppy 20-minute ferry ride from Victoria Wharf, [Robben Island](http://www.robben-island.org.za/) is home to the prison where political activist and South Africa's first democratic president Nelson Mandela was imprisoned for 18 years. Our tour guide was a former prisoner himself, and he shared his experiences and the history of Robben Island. Although it was a somber setting, we were able to learn more about the history of South Africa and how inequality existed not too long ago.\n\n#### Cape winelands\n\nThe Western Cape is home to some spectacular wine estates. Some GitLab team-members visited [Groot Constantia](https://www.grootconstantia.co.za/), the oldest wine-producing estate in the country, while others ventured further to Paarl, Franschhoek and Stellenbosch for a leisurely day of vineyard hopping and tasting. Those of us checking baggage loaded up on the good stuff to take home.\n\n#### City and cultural tour\n\nA tour of the city center included visits to the [District Six Museum](http://www.districtsix.co.za/), [Castle of Good Hope](https://castleofgoodhope.co.za/), and the [Slave Lodge](https://www.iziko.org.za/museums/slave-lodge), stopping off at the V&A Waterfront for lunch. Some persuasive GitLab team-members got the tour guide to agree to a diversion to quirky coffee shop and Capetonian institution, [Truth Café](https://truth.coffee/pages/truth-cafe), to soak up some of the city's coffee culture.\n\n#### Tour of Langa\n\nSome GitLab team-members also visited Langa, the oldest township in Cape Town. After being greeted by the locals at the cultural center, they shared their dance, music, and history. Some of us were even able to participate and beat on the drums or do a little dancing! Our tour guide shared the history of the township: its beginnings during Apartheid, how things are today, and where they are striving to rebuild unity within the community. Our tour ended with a lovely dance performance and goodbyes from the locals.\n\n### UGC sessions\n\nOur summit UGC (user-generated content) sessions are an opportunity for anyone attending to raise a subject for discussion or run a workshop. With topics as diverse as \"Kubernetes 101,\" \"Learn to Yo-Yo for fun and profit,\" \"How to be a great public speaker,\" \"Yoga/body balance,\" and \"Cocktail making class,\" there's always something for everyone, and it's up to individuals to decide how formal or off-the-cuff they want their session to be.\n\n\u003C!-- carousel -->\n\n\u003Cdiv id=\"carousel-example-generic-4\" class=\"carousel slide medium center\" data-ride=\"carousel\" data-interval=\"10000\">\n  \u003C!-- Indicators -->\n  \u003Col class=\"carousel-indicators\">\n    \u003Cli data-target=\"#carousel-example-generic-4\" data-slide-to=\"0\" class=\"active\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-4\" data-slide-to=\"1\">\u003C/li>\n  \u003C/ol>\n\n  \u003C!-- Wrapper for slides -->\n  \u003Cdiv class=\"carousel-inner\" role=\"listbox\">\n    \u003Cdiv class=\"item active\">\n      \u003Cimg src=\"/images/blogimages/summit2018/yoga-ugc.jpg\" alt=\"Yoga and body balance session\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/summit2018/for-funs-sake-ugc.jpg\" alt=\"Pinpoint pain points in GitLab session\">\n    \u003C/div>\n  \u003C/div>\n\n  \u003C!-- Controls -->\n  \u003Ca class=\"left carousel-control\" href=\"#carousel-example-generic-4\" role=\"button\" data-slide=\"prev\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-left\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M.44 10.13l8.345 8.345 2.007-2.007-6.814-6.814 6.814-6.815L8.785.832.44 9.177a.652.652 0 0 0-.202.477c0 .183.067.343.202.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Previous\u003C/span>\n  \u003C/a>\n  \u003Ca class=\"right carousel-control\" href=\"#carousel-example-generic-4\" role=\"button\" data-slide=\"next\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-right\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M10.59 10.13l-8.344 8.345L.24 16.468l6.814-6.814L.24 2.839 2.246.832l8.345 8.345a.652.652 0 0 1 .201.477.652.652 0 0 1-.201.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Next\u003C/span>\n  \u003C/a>\n\u003C/div>\n\nAs we grow, the summit grows with us. Now, our formidable resident summit expert [Kirsten](/company/team/#kirstenabma) is focusing on planning our summits FULL TIME. As we closed out our Cape Town gathering, she announced to wild cheers that our next one will be going down in May 2019, in New Orleans, LA, USA! Bring on the beignets!\n\nSee you next time 🇿🇦\n",[677,277,832,9],{"slug":3020,"featured":6,"template":680},"gitlab-summit-cape-town-recap","content:en-us:blog:gitlab-summit-cape-town-recap.yml","Gitlab Summit Cape Town Recap","en-us/blog/gitlab-summit-cape-town-recap.yml","en-us/blog/gitlab-summit-cape-town-recap",{"_path":3026,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3027,"content":3033,"config":3039,"_id":3041,"_type":14,"title":3042,"_source":16,"_file":3043,"_stem":3044,"_extension":19},"/en-us/blog/gitlab-summit-greece-recap",{"title":3028,"description":3029,"ogTitle":3028,"ogDescription":3029,"noIndex":6,"ogImage":3030,"ogUrl":3031,"ogSiteName":667,"ogType":668,"canonicalUrls":3031,"schema":3032},"αντίο (Goodbye) and thanks for a great GitLab summit – Crete edition","That's a wrap! Check out the keynote from our summit in Greece below.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671644/Blog/Hero%20Images/gitlab-summit-crete.jpg","https://about.gitlab.com/blog/gitlab-summit-greece-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"αντίο (Goodbye) and thanks for a great GitLab summit – Crete edition\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2017-10-25\",\n      }",{"title":3028,"description":3029,"authors":3034,"heroImage":3030,"date":3036,"body":3037,"category":299,"tags":3038},[3035],"Erica Lindberg","2017-10-25","\n\nFor the past week, around 250 GitLab team-members and significant others gathered in Crete, Greece\nto achieve one simple goal: **get to know each other!** As a remote-only company, we\ndon't often meet face to face, so our [summits](/events/gitlab-contribute/) are an extraordinary occasion. This year, in the spirit of \"everyone can contribute,\" we tried something new.\nWe decided to live stream from 9am to 9pm in an effort to bring the summit experience\ndirectly to you, wherever you are.\n\n\u003C!-- more -->\n\n## Highlights\n\nOver the course of the week, we accomplished a lot! Team members from over 30\ndifferent countries had the chance to work creatively with people outside of their\ncore team during the Amazing Race, mingle on the beautiful island of Santorini,\nand explore the ancient ruins of the Knossos Palace.\n\n### Summit challenges\n\n[In keeping with tradition from past summits](https://www.youtube.com/watch?time_continue=1&v=39chczWRKws),\nSid also had a couple of work-related challenges for the team. If we completed the challenges\nby the end of the week, he would perform a GitLab song.\n\n![summit challenges slide](https://about.gitlab.com/images/blogimages/summit-challenges-slide.jpg)\n\nWe managed to complete all of our challenges and at the closing Toga Party, Sid and Karen delighted us with a GitLab song to the tune\nof *[I'm Gonna Be (500 Miles)](https://www.youtube.com/watch?v=tbNlMtqrYS0)* \u003Ci class=\"fas fa-microphone\" aria-hidden=\"true\">\u003C/i>\n\nAnd the best part is that we were able to share this in real time with contributors from around the world. It was our vision to make the summit `read-write`, so that even if you weren't with us in Greece, you could\nstill participate and contribute. Thanks to everyone who joined in, sent in questions and comments, and for a while made the planet feel a little smaller.\n\n### Keynote with CEO Sid Sijbrandij\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/AopRnEbvgzE?start=3925\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\n#### Keynote slides\n\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vQA5srWjTIMmNHR3vWITDXlHj3iBSwxaTVLc_haoDZoBiH6XnGn_JdbR11A1YVOBd_mdcMZnxG_5yDS/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"1280\" height=\"749\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\nThanks everyone for participating! See you next time 😎\n",[832,9],{"slug":3040,"featured":6,"template":680},"gitlab-summit-greece-recap","content:en-us:blog:gitlab-summit-greece-recap.yml","Gitlab Summit Greece Recap","en-us/blog/gitlab-summit-greece-recap.yml","en-us/blog/gitlab-summit-greece-recap",{"_path":3046,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3047,"content":3052,"config":3058,"_id":3060,"_type":14,"title":3061,"_source":16,"_file":3062,"_stem":3063,"_extension":19},"/en-us/blog/gitlab-technical-certification-award-wins",{"title":3048,"description":3049,"ogTitle":3048,"ogDescription":3049,"noIndex":6,"ogImage":2010,"ogUrl":3050,"ogSiteName":667,"ogType":668,"canonicalUrls":3050,"schema":3051},"GitLab Technical Certifications program wins 5 awards at LearnX Conference","GitLab's Tech Certification programs won 5 different awards at this year's LearnX conference.","https://about.gitlab.com/blog/gitlab-technical-certification-award-wins","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Technical Certifications program wins 5 awards at LearnX Conference\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kendra Marquart\"}],\n        \"datePublished\": \"2021-12-03\",\n      }",{"title":3048,"description":3049,"authors":3053,"heroImage":2010,"date":3055,"body":3056,"category":720,"tags":3057},[3054],"Kendra Marquart","2021-12-03","\n\nIn June of this year our Professional Services team entered our [GitLab Technical Certification programs](/handbook/customer-success/professional-services-engineering/gitlab-technical-certifications/) into several different worldwide conferences and we are proud to announce that GitLab has won 5 awards at this year's LearnX learning impact awards! \n\nWe won 3 Gold awards for our [GitLab Certified CI/CD Specialist Self Paced Course](/services/education/gitlab-technical-certification-self-paced/) in the following categories: \n\n![LearnX gold award](https://about.gitlab.com/images/blogimages/learnxgold.png){: .shadow.small.left}\n\n- Best Certification Training Project \n- Best Game eLearning Design \n- Best Learning and Development Project \n\nWe won 2 Silver awards for our [GitLab Certified Associate Self Paced Course](/services/education/gitlab-technical-certification-self-paced/) in the following categories: \n\n![LearnX silver award](https://about.gitlab.com/images/blogimages/learnxsilver.png){: .shadow.small.left}\n\n- Best Micro/Bite Size eLearning Design \n- Best Content Curation Project\n\n## What is LearnX?\n\nThe LearnX Impact Awards is an annual event run by the LearnX Foundation, a not-for-profit organization promoting innovative workforce learning and supporting technologies. This conference is held once a year in November and highlights success in the learning and development space. \n\n## What GitLab Technical Certifications are Available?\n\nWe currenly offer the following [GitLab Technical Certifications](/handbook/customer-success/professional-services-engineering/gitlab-technical-certifications/), all of which are available as self-paced e-learnings in [GitLab Learn](/learn/) or as an [Instructor-Led class](/services/education/) with our Professional Services team.  \n\n\n",[677,9,267],{"slug":3059,"featured":6,"template":680},"gitlab-technical-certification-award-wins","content:en-us:blog:gitlab-technical-certification-award-wins.yml","Gitlab Technical Certification Award Wins","en-us/blog/gitlab-technical-certification-award-wins.yml","en-us/blog/gitlab-technical-certification-award-wins",{"_path":3065,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3066,"content":3072,"config":3078,"_id":3080,"_type":14,"title":3081,"_source":16,"_file":3082,"_stem":3083,"_extension":19},"/en-us/blog/gitlab-tiers",{"title":3067,"description":3068,"ogTitle":3067,"ogDescription":3068,"noIndex":6,"ogImage":3069,"ogUrl":3070,"ogSiteName":667,"ogType":668,"canonicalUrls":3070,"schema":3071},"New names for GitLab self-managed pricing tiers","Understand GitLab's pricing tiers and know which features your subscription gives you access to.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680136/Blog/Hero%20Images/gitlab-tiers-cover.png","https://about.gitlab.com/blog/gitlab-tiers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New names for GitLab self-managed pricing tiers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2018-04-20\",\n      }",{"title":3067,"description":3068,"authors":3073,"heroImage":3069,"date":3075,"body":3076,"category":299,"tags":3077},[3074],"William Chia","2018-04-20","\n\n_Note: We've continued to iterate on our platform and pricing model since this blog post was published in 2018. To see what's new (including everything from security and container-focused capabilities to guest users), check out our [platform](https://about.gitlab.com/platform/), [pricing](https://about.gitlab.com/pricing/), and [why GitLab](https://about.gitlab.com/why-gitlab/) pages._\n\nAt GitLab, [iteration is one of our ore values](https://handbook.gitlab.com/handbook/values/#iteration). We’ve recently iterated on the names of our self-managed pricing tiers, so [Marcia](/company/team/#XMDRamos) and I got together and wrote this post\nto catch you up on the current options. We’ll explain each tier, and share how to figure out\nwhich features your subscription gives you access to.\n\n- [GitLab deployment options](#gitlab-deployment-options)\n- [GitLab self-hosted](#gitlab-self-managed)\n- [GitLab.com](#gitlabcom)\n- [Repository architecture](#repository-architecture)\n- [Subscription model](#subscription-model)\n- [Examples of use cases](#examples)\n\n## GitLab deployment options\n\nTo use GitLab, you have two options:\n\n- **GitLab self-managed**: Install, administer, and maintain your own GitLab self-managed instance.\n- **GitLab.com**: GitLab's SaaS offering. You don't need to install anything to use GitLab.com,\nyou only need to [sign up](https://gitlab.com/users/sign_in) and start using GitLab\nstraight away.\n\n### GitLab self-managed\n\nWith GitLab self-managed, you deploy your own GitLab instance on-premises or in the cloud. From\nbare metal to Kubernetes, you can [install GitLab almost\nanywhere](/install/). GitLab self-managed has both [free\nand paid options](/pricing/):\n**Core**, **Starter**, **Premium**, and **Ultimate**.\n\nYou can see a full list of features in each self-managed tier on the [self-managed feature\ncomparison](/pricing/feature-comparison/) page. For more details on storage amounts and CI/CD minutes per month, see our [pricing page](https://about.gitlab.com/pricing/).\n\n### GitLab.com\n\nGitLab.com is hosted, managed, and administered by GitLab, Inc., with\n[free and paid options](/pricing/) for individuals\nand teams: **Free**, **Bronze**, **Silver**, and **Gold**.\n\nTo support the open source community and encourage the development of\nopen source projects, GitLab grants access to **Gold** features\nfor all GitLab.com **public** projects, regardless of the subscription.\n\nYou can see a full list of features in each GitLab.com tier on the [GitLab.com feature\ncomparison](/pricing/feature-comparison/) page.\n\n### Repository architecture\n\nWe develop GitLab from two repositories, one for GitLab Community Edition (CE)\nand another for GitLab Enterprise Edition (EE):\n\n- [GitLab CE](https://gitlab.com/gitlab-org/gitlab-ce/): open source code, [MIT-based\nlicense](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/LICENSE), from which we deliver\nGitLab CE packages.\n- [GitLab EE](https://gitlab.com/gitlab-org/gitlab-ee/): open core code, [proprietary\nlicense](https://gitlab.com/gitlab-org/gitlab-ee/blob/master/LICENSE), from which we deliver\nGitLab EE packages.\n\nGitLab EE grants you access to features by installing a license key. You\ncan also install GitLab EE and run it for free without a license key which will give you\naccess to the same features as CE. This makes it easier to upgrade later on.\n\nVisit the CE vs EE page to see [which GitLab installation method to\nchoose](/install/ce-or-ee/).\n\n### Subscription model\n\nGitLab Core contains all of the open source features of GitLab. Whether you are running GitLab\nCE or GitLab EE without a license key, you'll get access to the same Core features. The\nproprietary features of EE are unlocked by purchasing a license key.\n\nTiers are additive:\n- Starter contains all the features of Core\n- Premium contains all the features of Starter and Core\n- Ultimate contains all of the features of Premium, Starter, and Core\n\n![GitLab Core, Starter, Premium, Ultimate](https://about.gitlab.com/images/blogimages/gitlab-tiers-repos-and-tiers.jpg)\n\n### Examples\n\n- Consider a user of [GitLab Premium](/pricing/premium/) who wants to contribute to a given feature present in GitLab Core, e.g. Issue Boards. The code is submitted to the CE repo, therefore, it's open source code. The master branch of GitLab CE is then merged into GitLab EE. The CE code will be available to this Premium user in the next release.\n- Consider a user of GitLab Premium who wants to contribute to a given feature present only in Premium, e.g., Geo. The code is submitted directly to the EE repo, therefore, it's proprietary. The same is valid for Starter and Ultimate features.\n\n### Use cases\n\n#### GitLab self-managed use cases\n\n- I installed GitLab CE: I’m a Core user. I have access to Core features. The software I’m using is 100 percent open source.\n- I installed GitLab EE: the software I’m using is open core- it includes both open source and proprietary code.\n  - I don't have a subscription: I have access to Core features.\n  - I have a Starter subscription: I have access to Starter features.\n  - I have a GitLab Premium subscription: I have access to Premium features.\n  - I have a GitLab Ultimate subscription: I have access to Ultimate features.\n- I have a trial installation: I installed GitLab EE, and I’m an Ultimate user during the valid period of the trial. If the trial period expires and I don’t get a paid subscription (Starter, Premium, or Ultimate), I’ll become a Core user, with access to Core features.\n\n#### GitLab.com use cases\n\n- I use GitLab.com, a huge installation of GitLab EE. I’m using proprietary software.\n- I don’t have access to administration features as GitLab.com is administered by GitLab, Inc.\n- _Subscriptions_:\n  - I have a Bronze subscription: my private projects get access to Bronze features. My public projects get access to Gold features.\n  - I have a Silver subscription: my private projects get access to Silver features. My public projects get access to Gold features.\n  - I have a Gold subscription: my private projects get access to Gold features, as well as my public projects.\n  - I don’t have any paid subscriptions: I’m a Free GitLab.com user:\n      - I have access to Free features for private projects.\n      - I have access to Gold features for public projects.\n\n_Questions, comments? Let us know what you think below._\n",[9,677],{"slug":3079,"featured":6,"template":680},"gitlab-tiers","content:en-us:blog:gitlab-tiers.yml","Gitlab Tiers","en-us/blog/gitlab-tiers.yml","en-us/blog/gitlab-tiers",{"_path":3085,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3086,"content":3092,"config":3098,"_id":3100,"_type":14,"title":3101,"_source":16,"_file":3102,"_stem":3103,"_extension":19},"/en-us/blog/gitlab-top-devops-tooling-metrics-and-targets",{"title":3087,"description":3088,"ogTitle":3087,"ogDescription":3088,"noIndex":6,"ogImage":3089,"ogUrl":3090,"ogSiteName":667,"ogType":668,"canonicalUrls":3090,"schema":3091},"The top DevOps tooling metrics and targets at GitLab","Here is how we measure DevOps success and why we always try to look forward.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665635/Blog/Hero%20Images/blog-performance-metrics.jpg","https://about.gitlab.com/blog/gitlab-top-devops-tooling-metrics-and-targets","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The top DevOps tooling metrics and targets at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mek Stittri\"}],\n        \"datePublished\": \"2022-04-05\",\n      }",{"title":3087,"description":3088,"authors":3093,"heroImage":3089,"date":3095,"body":3096,"category":743,"tags":3097},[3094],"Mek Stittri","2022-04-05","\n\nA successful DevOps practice relies heavily on metrics. Here at GitLab, we use seven key DevOps metrics to measure engineering efficiency and productivity.  Like many teams, we use industry standard metrics, but in some cases, we approach this data with a unique GitLab point of view. Here’s the first in a multipart look at the DevOps metrics we at GitLab think are most critical for success. Compare your metrics and results with ours, and [let’s get a conversation started](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/13202).\n\n## Master pipeline stability\n\nIt’s important to be able to measure the stability of the GitLab project’s master branch pipeline. This metric tells us how stable the main branch is, and ensures engineers are checking out code that’s in good shape. [Merge trains](https://gitlab.com/gitlab-org/quality/team-tasks/-/issues/195) are key to this effort. \n\nOur target percentage for [master pipeline stability](/handbook/engineering/quality/performance-indicators/#master-pipeline-stability  ) is above 95%.\n\n![master pipeline stability](https://about.gitlab.com/images/blogimages/dometrics1.png)\n\n## Review app deployment success rate\n\nAt GitLab we take [review apps](https://docs.gitlab.com/ee/ci/review_apps/) seriously.  We measure their success rate so we can understand the stability of our first deployed environment after code change. Review apps are spun up at MR submission. It’s important to monitor our review app successful deployments because it’s the first place where code is integrated and deployed as one unit. This metric ensures the codebase can be installed, tested, and made available for the team to preview their changes before merging into the main master branch. \n\nOur target for [review application deployment success](/handbook/engineering/quality/performance-indicators/#review-app-deployment-success-rate) is above 99%. \n\n![review app deployment success](https://about.gitlab.com/images/blogimages/dometrics2.png)\n\n## Time to First Failure\n\nTime to First Failure (TtFF, pronounced as “teuf”) measures how fast we are providing feedback to engineers. This metric examines how long it takes from pipeline creation to the first actionable failed build. The idea is that if the commit is going to fail, it should fail fast and the fail signal should get to the engineers as quickly as possible. The shorter the time to first failure, the faster the feedback loop, and faster time to action to address those failures. \n\nOur [TtFF target](/handbook/engineering/quality/performance-indicators/#time-to-first-failure) is less than 15 minutes.\n\n![TtFF or Time to First Failure](https://about.gitlab.com/images/blogimages/dometrics3.png)\n\n## Open S1 bug age\n\nThis metric focuses on the age of open S1 bugs. Many organizations measure time to close bugs. At Gitlab we focus on the age of bugs remaining. We structure the metric to focus on work that is remaining and can be actioned on. If we only measure time to close of fixed defects, we may miss addressing older defects and unintentionally incentivize closing of only newer defects. We like to look forward by asking ourselves “What’s left?” and “What can be done now?” rather than only looking backward at what’s already been done.\n\nOur target for [S1 open bug age](/handbook/engineering/quality/performance-indicators/#s1-oba) is under 100 days.\n\n![Open S1 bug age](https://about.gitlab.com/images/blogimages/dometrics4.png)\n\n## Open S2 bug age\n\nThis metric is similar to the open S1 bug age, but is focused on S2 bugs. Again, we measure the age of remaining open bugs rather than focusing on bugs that have been closed.\n\nOur target for the [open S2 bug age](/handbook/engineering/quality/performance-indicators/#s2-oba) metric is below 300 days.\n\n![Open S2 bug age](https://about.gitlab.com/images/blogimages/dometrics5.png)\n\n## Merge request pipeline duration\n\nWhen a pipeline is started for a merge request, how long does it take to run? This metric focuses on the duration of merge request pipelines and its time efficiency.  Within the total duration we break the data down into multiple  stages The team then iterates and improves time efficiencies of each stage of the pipeline. This is a key building block for improving GitLab’s code cycle time and efficiency and ensures the code is merged in a timely manner.\n\nOur target for this metric is below 45 minutes.\n\n![MR pipeline duration](https://about.gitlab.com/images/blogimages/dometrics6.png)\n\n## MR pipeline costs\n\nWe use this metric at GitLab to help us determine our Merge Request Pipeline cost efficiency. We look at the total costs for the CI runners machines for MR pipelines. Once we’ve determined that figure, we divide it by the number of merge requests. This helps us monitor cost while fine-tuning efficiency. Speed and cost moves in different directions. To help speed up you can increase resources, but it comes at a cost. Monitoring this metric enables us to be balanced and have a healthy trade-off between optimizing for cost and speed.\n\nOur target for the [MR pipeline costs](/handbook/engineering/quality/performance-indicators/#merge-requests-pipeline-cost) metric is below 7.50.\n\n![MR pipeline costs](https://about.gitlab.com/images/blogimages/dometrics7.png)\n\n## What DevOps tooling metrics are most effective for your team?\n\nWe’d like to hear what you think of our choices, and our targets, and what works, or doesn’t, for you. [Chime in here](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/13202).\n",[1440,9,722],{"slug":3099,"featured":6,"template":680},"gitlab-top-devops-tooling-metrics-and-targets","content:en-us:blog:gitlab-top-devops-tooling-metrics-and-targets.yml","Gitlab Top Devops Tooling Metrics And Targets","en-us/blog/gitlab-top-devops-tooling-metrics-and-targets.yml","en-us/blog/gitlab-top-devops-tooling-metrics-and-targets",{"_path":3105,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3106,"content":3111,"config":3118,"_id":3120,"_type":14,"title":3121,"_source":16,"_file":3122,"_stem":3123,"_extension":19},"/en-us/blog/gitlab-ux-2020-year-in-review",{"title":3107,"description":3108,"ogTitle":3107,"ogDescription":3108,"noIndex":6,"ogImage":690,"ogUrl":3109,"ogSiteName":667,"ogType":668,"canonicalUrls":3109,"schema":3110},"GitLab UX 2020 Year in Review","2020 was a difficult but productive year. Let's take a look back.","https://about.gitlab.com/blog/gitlab-ux-2020-year-in-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab UX 2020 Year in Review\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christie Lenneville\"}],\n        \"datePublished\": \"2020-11-20\",\n      }",{"title":3107,"description":3108,"authors":3112,"heroImage":690,"date":3114,"body":3115,"category":1342,"tags":3116},[3113],"Christie Lenneville","2020-11-20","\nA global pandemic and broad social unrest have made this year difficult for everyone. When times are as tough as 2020 has proven to be, it's easy to focus on the negative and forget about the many good things that happened along the way. But our product designers, user researchers, and technical writers spend every day doing great work, and we can't let that slip by unnoticed. \n\nIn this post, I want to be intentional about celebrating our successes during a year when many of us wanted to just curl up under a comfy blanket and wait for the turmoil to pass. So, let's take a moment to reflect on some of the things we can feel really proud to have achieved.\n\n## Usability is now a key consideration in our category maturity model\n\nHistorically, we rated the [maturity](https://about.gitlab.com/direction/maturity/) of our product areas fairly subjectively and based almost entirely on feature availability. This year, that changed when we introduced [Category Maturity Scorecards](https://about.gitlab.com/handbook/product/ux/category-maturity/category-maturity-scorecards/) that are based on user research. Now, we start by considering the Job to be Done (JTBD) that our users need to accomplish, and we gather user feedback to rate the entire experience -- not just functionality, but usability, too. \n\nWe've learned some amazing things through this new approach, and those learnings have enabled us to make [valuable recommendations](https://gitlab.com/gitlab-org/gitlab/-/issues?label_name%5B%5D=cm-scorecard-rec) to improve our product experience in areas like Code Review, Logging, and Issue Management. We have several additional scorecard initiatives underway, which means that our focus on creating an exceptional experience will only continue to grow. \n\nSo often, UX departments complain that they have to fight for executives to acknowledge the importance of usability on business outcomes. In this case, refining category maturity started as an idea from [Sid](https://gitlab.com/sytses), our CEO. This is honestly amazing! It's the kind of user-centered focus that UX teams get really excited about.\n\nAs the person who leads UX at GitLab, it was awesome for me to watch our cross-functional team immediately get on board. Because measuring product maturity isn't an industry standard, through our value of [Iteration](https://handbook.gitlab.com/handbook/values/#iteration) it took us some time (and a false start) to determine the right approach. Fortunately, Product leadership was both enthusiastic and patient, UX Researchers were persistent in taking feedback and making methodological refinements, and Product Designers were courageous in trying something they've never done before. Even better: Technical Writing has been involved, too, as we've identified documentation improvements that will refine our product maturity. \n\nThis was truly a team effort, and I appreciate everyone who participated. 🤝\n\n## Our design system evolved from an idea into reality\n\nWhen I joined GitLab in early 2019, our design system, [Pajamas](https://design.gitlab.com/), was a scrappy project that the design team was working hard to get off the ground. We had designed a set of 28 single-source-of-truth components and were working hard to build them into [GitLab UI](https://gitlab.com/gitlab-org/gitlab-ui), our Vue-based component library.\nWe now have a robust design library that's implemented in Figma, and a large collection of SSOT Vue components are available to use in the product, too. Even more exciting: We're just finishing with implementing our 8 most impactful components across the entire product UI (buttons, alerts, dropdowns, modals, tabs, popovers, and tooltips), which will result in better performance and consistency when we're done. (We're so close!)\n\nMost amazing to me was watching product designers and technical writers jump in to do much of this component migration work themselves. This was no small feat, because frontend development is not something that many of us are deeply skilled at. But, apparently we're both tenacious and brave, because we did the work anyway (with lots of help from our Frontend Engineers and the awesome documentation that our UX Foundations team created). In the process, we've gotten to know both our product features (which are complex) and our code base (which is also complex) even better, which makes us more effective in our day-to-day jobs.\n\nSpeaking of our UX Foundations team, this is another related success. At the beginning of 2020, we got the budgetary support to create a team that is dedicated solely to maintaining our design system and tooling. The team may be small, but its impact certainly isn't. They've already made some big improvements to things like:\n\n* **Improving tooling for designers:** The move to Figma allows for greater collaboration, as well as community contributions. Sketch is only available on Mac platforms and there are no real-time collaboration features. Figma allows us to provide a UI Kit that is available across platforms, while being available for community contributors to use for free. It also promotes collaboration through its use of real-time editing capabilities and version history. We were able to streamline developer handoff by simply linking to the design file, reducing the need for additional plugins such as Sketch Measure.\n* **Making our color palette consistent and accessible:** We addressed color contrast for accessibility and normalized the palette across hues, so that we can better systematize variable use throughout the UI.\n* **Improving consistency in our icons:** With the creation of our own [SVG Library](http://gitlab-org.gitlab.io/gitlab-svgs/), we've been working to [deprecate our use of Font Awesome](https://gitlab.com/groups/gitlab-org/-/epics/2331) throughout the year. With the help of the Frontend department, we've closed out 156 out of 168 issues related to this effort.\n* **Moving towards more accessible workflows:** Near the end of the year, we've started focusing more on building accessibility standards into our workflows. We are currently auditing and updating our [voluntary product accessibility template](https://design.gitlab.com/accessibility/vpat), as well as [incorporating accessibility audit guides](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/2158) into Pajamas.\n\n## Actionable insights\n\nUser research is so incredibly valuable... when you take action on it. But it can be a challenge for research teams to condense their powerful findings into small but compelling insights and then track those insights to determine whether they actually make it into the product.\n\nIn the second half of this year, our user research team made two big strides in this area. First, we started using [Dovetail](https://dovetailapp.com/) to help us more easily analyze research data to find meangingful insights and share it collaboratively with Product Managers and Product Designers (and anyone else who may be interested). But, they took this a step farther by also beginning to [track actionable insights](https://about.gitlab.com/handbook/product/ux/performance-indicators/#actionable-insights) as a performance indicator.\n\nThe considerable effort it took to get both of these programs in place will be worth it as we watch our research efforts result in an even better product.\n\n## Beautifying our docs\n\nComplex products like GitLab require high-quality documentation. Some things you just can't (and shouldn't) communicate through the UI, so users rely on great docs to get their daily jobs done.\n\nOur Technical Writing team (many of whom have been with GitLab less than a year) worked hard to improve our docs site during 2020, including:\n\n- Several UX research projects to discover - and fix! - problems users encounter when using the docs site.\n- A \"Beautification\" effort that focused on an updated visual design. Our 2020 GitLab Contribute event included many rapid improvements to the docs site, and we made many more afterward. (Did you notice?)\n- Ongoing content improvements, including making our docs more consistent, findable, detailed, and easier to read.\n- Adding (a lot of) metadata information to product docs to help connect content contributors with Technical Writers.\n- Coding innovations for automation, such as grammar checking with Vale, a linter, to automatically catch errors before they’re merged.\n\nWe’ve also completed work on a Docs Strategy roadmap to drive even more improvements in the upcoming months.  \n\n## And so much more...\n\n* GitLab Design Talks: In this fun video series, watch designers, technical writers, researchers, and product managers talk about [Iteration](https://www.youtube.com/playlist?list=PL05JrBw4t0KpgzLWbRCXf8o7iap-uoe7o) and [Collaboration](https://www.youtube.com/playlist?list=PL05JrBw4t0KrER807JktsL-addVZa4N0-) at GitLab. (Special thanks to host [Nick Post](https://gitlab.com/npost)!)\n* UX Showcase: See [100+ videos](https://www.youtube.com/playlist?list=PL05JrBw4t0Kq89nFXtkVviaIfYQPptwJz) highlighting exciting UX work happening across GitLab. I learn something new everytime I watch one of these.\n* Blog posts: Read about a variety of topics we were thinking about in 2020, including:\n    * [Designing in an all-remote company](https://about.gitlab.com/blog/designing-in-an-all-remote-company/)\n    * [Running an asynchronous sketching workshop for UX](https://about.gitlab.com/blog/async-sketching/)\n    * [Synchronous collaboration as a remote designer at GitLab](https://about.gitlab.com/blog/synchronous-collaboration-as-a-remote-designer-at-gitlab/)\n    * [A tale of two file editors](https://about.gitlab.com/blog/a-tale-of-two-editors/)\n    * [How holistic UX design increased GitLab.com free trial signups](https://about.gitlab.com/blog/how-holistic-ux-design-increased-gitlab-free-trial-signups/)\n    * [Improving iteration and collaboration with user stories](https://about.gitlab.com/blog/how-we-utilize-user-stories-as-a-collaborative-design-tool/)\n    * [Designing incident management from scratch](https://about.gitlab.com/blog/designing-alerts-and-incidents/) \n    * [Why GitLab is the right design collaboration tool for the entire team ](https://about.gitlab.com/blog/why-gitlab-is-the-right-design-collaboration-tool-for-the-whole-team/)\n\nAgain, the GitLab UX team does amazing work every single day, and there is no way to capture all of that effort in a single blog post. As this year wraps up, I hope you personally take time to think about your own successes and the impact they had on our fast-moving company. \n\nI also hope you know that we value every one of you. You are appreciated. 💜\n\n{::options parse_block_html=\"true\" /}\n\n\u003Cdiv class=\"panel panel-gitlab-purple\">\n  \u003Cp class=\"panel-heading\">\u003Cstrong>One more thing...\u003C/strong>\u003C/p>\n\u003Cdiv class=\"panel-body\">\n\n\u003Cp>The final 2020 highlight I wanted to ensure is here was Christie Lenneville's own promotion to be GitLab's first \u003Cstrong>Vice President of User Experience (UX)\u003C/strong>. I knew that as both the author of this article, and as a humble (and great) leader she'd be hesitant to add this herself. But it's not only a recognition of her achievements and her potential. VP-level leadership of UX at GitLab should \u003Ci>also\u003C/i> be a signal of how important UX is to our organization and to our community. And it should indicate that usability is an important differentiator for GitLab, and a critical part of our company's strategy. Congratulations again, Christie!\u003C/p>\n\n&mdash; Eric Johnson, Chief Technology Officer\n\n\u003C/div>\n\u003C/div>\n\n{::options parse_block_html=\"false\" /}\n",[700,1698,9,3117],"research",{"slug":3119,"featured":6,"template":680},"gitlab-ux-2020-year-in-review","content:en-us:blog:gitlab-ux-2020-year-in-review.yml","Gitlab Ux 2020 Year In Review","en-us/blog/gitlab-ux-2020-year-in-review.yml","en-us/blog/gitlab-ux-2020-year-in-review",{"_path":3125,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3126,"content":3132,"config":3139,"_id":3141,"_type":14,"title":3142,"_source":16,"_file":3143,"_stem":3144,"_extension":19},"/en-us/blog/gitlab-vue-one-year-later",{"title":3127,"description":3128,"ogTitle":3127,"ogDescription":3128,"noIndex":6,"ogImage":3129,"ogUrl":3130,"ogSiteName":667,"ogType":668,"canonicalUrls":3130,"schema":3131},"How we do Vue: one year later","How we, at GitLab, write VueJS, one year later.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680321/Blog/Hero%20Images/vue-title.jpg","https://about.gitlab.com/blog/gitlab-vue-one-year-later","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we do Vue: one year later\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2017-11-09\",\n      }",{"title":3127,"description":3128,"authors":3133,"heroImage":3129,"date":3135,"body":3136,"category":743,"tags":3137},[3134],"Jacob Schatz","2017-11-09","\n\nIt's been a while since [we wrote about Vue](/blog/why-we-chose-vue/). We've been using Vue for over a year now and life has been very good. Thanks [@lnoogn](https://twitter.com/lnoogn) for reminding me to write this article!\n\n\u003C!-- more -->\n\nOur situation reminds me of a quote about Scala from [\"Is Scala slowly dying?\"](https://www.reddit.com/r/scala/comments/2hw0bp/is_scala_slowly_dying/) Someone once said:\n\n> Scala people don't have time for redditing and blogging, they're busy getting crap done.\n\nWhich is exactly what we've been doing. Like Scala, Vue works really, really well, when used properly. It turns out Vue isn't a buzzword, Vue is a workhorse. A lot of our problems have been solved, by us and others. We still have problems but, we now have a reproducible \"way to write Vue.\" We don't adopt every new idea out there, but we have changed a few things since we last spoke.\n\nSince that last post, we published a [very extensive Vue style guide](https://docs.gitlab.com/ee/development/fe_guide/vue.html), after which Vue also put out a [style guide](https://vuejs.org/v2/style-guide/), [taking inspiration from ours](https://github.com/vuejs/eslint-plugin-vue/issues/77#issuecomment-315834845). The style guide has been updated several times as we discover better ways to write Vue. Here are some of the things we discovered.\n\n## Just use VueX\n\nWe discovered that [VueX](https://vuex.vuejs.org/) makes our lives easier. If you are writing a medium to large feature, use VueX. If it's a tiny feature, you might get away without it. We made the mistake of not using VueX for a large feature. We wrote a [multi-file editor](https://gitlab.com/gitlab-org/gitlab-ce/issues/31890) (WIP) to replace our current repo file view, to allow easy editing of multiple files.\n\n![multi-file-editor.png](https://about.gitlab.com/images/vue_2017/multi-file-editor.png){: .shadow}\n\nIn the beginning we did not use VueX for this feature and instead used the store pattern. The Vue docs talk about the [store pattern](https://vuejs.org/v2/guide/state-management.html#Simple-State-Management-from-Scratch), which works well when you are committed to strictly keeping to the pattern. We've found that you are better off spending your time with VueX instead. While VueX is initially more verbose, it is much more scalable, and will save you tons of time in the long run. Our mistake happened when we changed the data in multiple places. In VueX you are forced to change the data in one central place. If you don't do this, you will wind up chasing unexpected bugs around.\n\n## Write high quality code\n\nEven though VueJS and VueX are both wonderful, it is still possible (as with any code) to write bad Vue code. While the code may work, your longevity and scalability may suffer. Performance can suffer. With Vue, it makes it so easy to have what seems like working, perfect code because Vue is so simple to write. Longevity problems can mean that your code initially works, but you (and others) will have a hard time trying to update the code. Performance problems might not crop up with small data sets, but will with larger ones. Code can get messy. Your code can get smelly. Yes, even with Vue, you can have [code smell](https://en.wikipedia.org/wiki/Code_smell).\n\nWhen you add something to the `data` object or the `store` for Vue to keep track of, Vue will recursively walk down your data object and keep track of everything. If your data is super hierarchical and just large in general, and you are changing things often (like maybe on `mousemove`), then you can create jank. It's not bad to have Vue observe large data sets, but just confirm that you do in fact need the data you are watching to be reactive. It's easy with Vue to just make everything reactive, when it might not need to be.\n\nThat's why we are very strict when anyone writes Vue code. They must [follow our documentation](https://docs.gitlab.com/ee/development/fe_guide/vue.html). They must also only write Vue when it is necessary and not write it [when it is overkill](https://docs.gitlab.com/ee/development/fe_guide/vue.html#when-not-to-use-vue-js).\n\nAll of our new Vue code follows the [Flux architecture](https://facebook.github.io/flux/). VueX also follows Flux, which is part of the reason we use VueX. You can use the previously mentioned \"store pattern,\" but VueX is a better choice because it enforces all of the rules. If you go rogue, you will wind up enforcing the rules yourself, and you will probably make mistakes. The less you put on your plate, the better. A good example of a well-written Vue app is the [registry image list](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/14303).\n\n### I want to use jQuery with Vue\n\nDuring new development, this question kept popping up.\n> Is it ever OK to mix jQuery with VueJS?\n\nWe are not talking about using [Select2](https://select2.org/), which is a jQuery library. We are talking about the need to query the DOM. We had discussions about using jQuery and the following was proposed:\n\n> Using jQuery is OK, but only for querying.\n\nAt first I had several discussions about using jQuery with Vue. Some had said it might be OK, but only in read-only (querying) situations. However, after doing the research, we found that it is **not** a good idea to use jQuery with Vue. There will always be a better solution. We found that if you ever find yourself needing to query to DOM within a Vue architecture, then you are doing something wrong.\n\nIf one were to hypothetically use jQuery for only the tiniest querying situations, one would have to quantify those situations. You should instead swear off querying the DOM when in Vue.\n\nInstead of querying, you will find that using the `store` in combination with the server-side code is usually a much simpler answer. The server can provide validity to your data that you cannot provide on the client side. For the most part, we find that the less we have to fool with the data on the client side the better. That's not to say it's never OK to modify the data on the client side, but that it isn't usually the cleanest solution. At GitLab we use querying only to grab endpoints from the `data` attribute of our main element, but we don't use jQuery, we use `el.dataset`. At GitLab, we (the Frontend people) talk with the Backend people to ensure the structure of the data we will be consuming. In that way, both the Frontend team and the Backend team can be in control.\n\n#### Example situation:\n\nCheck out this issue:\n\n![issue](https://about.gitlab.com/images/vue_2017/issue.png){: .shadow}\n\nWe now render all issue comments in Vue. An example of a situation where we wanted to use jQuery was during the rewrite of the edit-the-last-user-comment feature. When someone presses that `up` key on their keyboard from an empty new comment `textarea` (at the very bottom of the page) we allow them to edit the last comment they created, just like in Slack. Not just the last comment, but the last comment *they created*. We marked the last user comment in the picture in red. Of course there is a time crunch. Then someone might say,\n\n> Can't we just do a quick solution here and fix it later?\n\nSurely you *could* query the DOM for this. A better solution, in this case, is to let the backend developers mark the last user comment in the JSON they return. Backend developers have direct access to the database, which means they may be able to optimize the code. Then no client-side work has to be done at all, in this case. Someone has to do the work to mark the last user comment. In this case the solution is just finding the right person for the job. Once you have that data from the server, the comment is in your `store`, ready for your easy access. You can do anything now. The world is your oyster.\n\nIf you find yourself querying the DOM, \"just this one time\" 😉, there is always a better solution.\n\n### The proper Vue app\n\nEvery Vue bundle needs one store, one service, and always has one entry point. Your entry point component is the only container component and every other component is presentational. All this information is in our Vue docs.\n\nYou can start out with a single `div`.\n\n```html\n\u003C!--HAML-->\n.js-vue-app{ data: { endpoint: 'foo' }}\n\n\u003C!--HTML-->\n\u003Cdiv class=\"js-vue-app\" data-endpoint=\"foo\">\u003C/div>\n```\nYou can pass your endpoints in through the data attributes. Vue can then call these endpoints with an HTTP client of your choice.\n\nYou don't want to do any URL building in client-side JavaScript. Make sure you pass in all your server-built URLs through endpoints. When writing Vue it's important to let the server do what it should.\n\n## Improve performance\n\nWe recently rewrote our issue comments in Vue. The issue comments were previously written in Haml, jQuery, and Rails. We had a bottleneck because we were not loading the comments asynchronously. A quick solution is to load comments via ajax and populate comments after the page loads. One way to make a page load faster is to not block the page with heavy items and load them after.\n\n![comments.png](https://about.gitlab.com/images/vue_2017/comments.png){: .shadow}\n\nWhat we love is that one day we turned on the new comments and some people didn't know that we had refactored it. As a result of the refactor our issue pages load much faster, and there is less jank.\n\nLoading the comments on the issue page is now streamlined and now individual issues load much faster. In the past, an issue page could have tens of thousands of event listeners. Our previous code was not properly removing and keeping track of event listeners. Those massive event listeners (along with other problems) created jank, so scrolling the page was choppy with many comments. We removed jQuery and added in Vue and focused on improving the performance. You can clearly see and feel that the page is much faster. However, our work to improve the performance has just begun. This rewrite sets the foundation for performance improvements that are easier to write, because the code is much more maintainable. Previously the code was hard to maintain. Now the issue comments code is properly separated and \"componentized.\"\n\nWith these new improvements, as well as other parallel improvements, e.g. loading images on scroll, we were able to make the page load and perform faster.\n\n![speed.png](https://about.gitlab.com/images/vue_2017/speed.png){: .shadow}\n\nRefactoring is that word that a new, super-green developer mentions on day one when they suggest to rewrite everything in Angular. That hasn't happened at GitLab. Our frontend devs tend to be very conservative, which is a very good thing. Which begs the question, why does it seems like [everyone is always refactoring](https://reasonml.github.io/community/blog/#reason-3)? What are they trying to achieve? I can only speak for GitLab. What do we want to achieve with a refactor? In reality it's going to cost a lot of money. The costs are:\n\n1. Cost of doing the refactoring.\n1. Cost of testing the change.\n1. Cost of updating tests and documentation.\n\nYou also have more risk:\n\n1. Risk of introducing bugs.\n1. Risk of taking on a huge task that you can't finish.\n1. Risk of not achieving the quality/improvements you intended.\n\nOur goals are:\n\n**Goal #1**: Make the code more maintainable. We want to make the process of adding new features easier. In the long term this refactor will save us time, but it takes a significant amount of time to recoup the time spent refactoring. The hard truth may be that a refactor usually does not save you time, but can save you stress.\n\n**Goal #2**: What it can do, if done right, is make developers happy. Nothing gives your team more horsepower than a happy, excited coder. A stressed-out coder will want to stop coding; an excited coder will not want to stop. A happy coder saves the most time.\n\nTo meet our goal our next step is to refactor the merge request comments section. Our merge request comments are massively slow for merge requests with lots of comments. The comments become slower and start to be less responsive at around 200 comments. The diffs are slow as well. There are a ton of reasons for this, one of which is that JavaScript is causing multiple reflows that take tons of time. We could refactor this and have already put in a fix, but this isn't a long-term solution.  In the case of a huge MR, there was code that was causing a reflow that [takes over eight seconds](https://gitlab.com/gitlab-org/gitlab-ce/issues/39332)! This is now fixed. In this [image](https://gitlab.com/gitlab-org/gitlab-ce/uploads/e18856a1544d4d0e6420d11fd0479af7/ss__2017-10-20_at_1.41.04_PM.png)  you can see there is other stuff slowing things down. Clearly there is a lot of work to do here. Our biggest problem is that the code is not maintainable, which means that fixes take longer. A refactor into Vue will provide some great initial speed improvements, and lay the groundwork for easier improvements in the future.\n\nThere is so much work to do at GitLab. If you want to be a part of exploring the massive catacombs of GitLab and writing awesome code and if you are interested in helping out our Frontend team, then [apply](https://handbook.gitlab.com/job-families/engineering/development/frontend/).\n",[3138,9],"frontend",{"slug":3140,"featured":6,"template":680},"gitlab-vue-one-year-later","content:en-us:blog:gitlab-vue-one-year-later.yml","Gitlab Vue One Year Later","en-us/blog/gitlab-vue-one-year-later.yml","en-us/blog/gitlab-vue-one-year-later",{"_path":3146,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3147,"content":3153,"config":3158,"_id":3160,"_type":14,"title":3161,"_source":16,"_file":3162,"_stem":3163,"_extension":19},"/en-us/blog/gitlab-welcomes-janelle-romano-and-patty-molthen-to-federal-advisory-board",{"title":3148,"description":3149,"ogTitle":3148,"ogDescription":3149,"noIndex":6,"ogImage":3150,"ogUrl":3151,"ogSiteName":667,"ogType":668,"canonicalUrls":3151,"schema":3152},"GitLab welcomes Janelle Romano and Patty Molthen to Federal Advisory Board","The new board members come onboard as GitLab continues to identify opportunities to drive new technology adoption in the public sector.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663993/Blog/Hero%20Images/2018-developer-report-cover.jpg","https://about.gitlab.com/blog/gitlab-welcomes-janelle-romano-and-patty-molthen-to-federal-advisory-board","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab welcomes Janelle Romano and Patty Molthen to Federal Advisory Board\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-08-03\",\n      }",{"title":3148,"description":3149,"authors":3154,"heroImage":3150,"date":3155,"body":3156,"category":675,"tags":3157},[950],"2022-08-03","[GitLab](/solutions/public-sector/) Federal, LLC, provider of The One DevOps Platform for the public sector, is excited to announce the appointment of Janelle Romano and Patty Molthen to its Federal Advisory Board. \n\n“GitLab is thrilled to welcome Janelle Romano and Patty Molthen, two seasoned leaders, to the Federal Advisory Board as we continue to demonstrate the value of The One DevOps Platform within the public sector,” said Bob Stevens, Area Vice President, Public Sector at GitLab Federal, LLC. “We look forward to their contributions to GitLab as we continue identifying opportunities to drive adoption of new technologies and enable innovation and efficiency within the public sector.”  \n\nRomano joins the GitLab Federal Advisory Board after nearly three decades in government, first operating in, and then leading research, development, and operational organizations. She currently serves as Vice President of Cyberspace Operations at CACI, following her role as Chief of Critical Networks Defense at the National Security Agency. \n\n“GitLab's mission to deliver a single, open core application that streamlines operations while allowing everyone to contribute is the type of driver that the government needs to help it achieve rapid iteration, integration, and innovation,” said Romano. “I am incredibly excited to work with GitLab to enable our servicemembers, intelligence professionals, and civil servants to unlock their own potential while delivering critical applications needed to achieve mission outcomes.” \n\nFor more than 20 years, Molthen served as an independent consultant to firms that work closely with the Department of Veterans Affairs, Military Health, the Defense Health Agency, Federal Healthcare Systems, and the Department of Defense. She is the owner of CM2 Group, a small consulting company that specializes in industries such as aerospace, transportation, and military defense. Molthen has been a member of several organizations in the Washington, D.C. area, including Women in Defense, Northern Virginia Technology Council – Acquisition team, ACT-IAC Small Business Alliance and Acquisition and Emerging Technologies Community of Interest panels, and a member of NDIA.\n\n“I was drawn to GitLab due to its mission of enabling organizations to ship secure products more efficiently. I look forward to contributing my expertise in healthcare informatics, IT policy, and contracting support to benefit our servicemembers and military healthcare as a member of this prestigious board,” Molthen said. \n\nRomano and Molthen join existing board members August Schell President John Hickey, Mountain Wave Ventures Partner Roger Cressey, Efrus Federal Advisors Founder Rob Efrus, and Buck Consulting Group CEO Nick Buck. In line with the company’s commitment to its Diversity, Inclusion, and Belonging [value](https://handbook.gitlab.com/handbook/values/#diversity-inclusion), GitLab is proud to welcome these two accomplished women as advisors.",[675,9],{"slug":3159,"featured":6,"template":680},"gitlab-welcomes-janelle-romano-and-patty-molthen-to-federal-advisory-board","content:en-us:blog:gitlab-welcomes-janelle-romano-and-patty-molthen-to-federal-advisory-board.yml","Gitlab Welcomes Janelle Romano And Patty Molthen To Federal Advisory Board","en-us/blog/gitlab-welcomes-janelle-romano-and-patty-molthen-to-federal-advisory-board.yml","en-us/blog/gitlab-welcomes-janelle-romano-and-patty-molthen-to-federal-advisory-board",{"_path":3165,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3166,"content":3171,"config":3176,"_id":3178,"_type":14,"title":3179,"_source":16,"_file":3180,"_stem":3181,"_extension":19},"/en-us/blog/gitlabs-2018-product-vision",{"title":3167,"description":3168,"ogTitle":3167,"ogDescription":3168,"noIndex":6,"ogImage":2991,"ogUrl":3169,"ogSiteName":667,"ogType":668,"canonicalUrls":3169,"schema":3170},"GitLab's 2018 Product Vision: Prototype demo","Take an early look at where we're heading this year.","https://about.gitlab.com/blog/gitlabs-2018-product-vision","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's 2018 Product Vision: Prototype demo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Pundsack\"}],\n        \"datePublished\": \"2018-02-26\",\n      }",{"title":3167,"description":3168,"authors":3172,"heroImage":2991,"date":3173,"body":3174,"category":299,"tags":3175},[1796],"2018-02-26","\nAt GitLab, we believe there's something magical about a video demo as a way to\n[convey strategic\nvision](/handbook/product/index.html#communicating-product-vision). We've\ncreated this video to internally align where we're going; and since we're\n[transparent by\ndefault](https://handbook.gitlab.com/handbook/values/#transparency), you get to see\nit as well!\n\n\u003C!-- more -->\n\nSo sit back, [watch the video](https://youtu.be/RmSTLGnEmpQ), follow\nalong with [the\npresentation](https://docs.google.com/presentation/d/19dZ1Y4us11B_96YoXvgQL4aBXPy2iNYRId0vmTulnnQ/edit?usp=sharing),\nor read below for a lightly edited transcript of the video. You can also [play\nwith the prototype](https://framer.cloud/UaofH/index.html) yourself (click the\nheader to move to the next page, click the left sidebar to move back) or\n[follow our progress](/direction/).\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/RmSTLGnEmpQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Introduction\n\nToday I’m going to talk about GitLab’s product vision for 2018. Specifically,\nI’m going to show a prototype of what the product might look like.\n\nAs you can imagine with a product vision as extensive as ours, there’s a lot to\ncover. So if you only remember three things from this presentation, know that:\n\n1. We’re going after the **complete DevOps** lifecycle, and specifically,\n2. we want **Operations and Security** to use GitLab as a primary interface, and\n3. a [single application](/topics/single-application/) covering this entire scope brings emergent benefits, specifically that people can work **concurrently**, on the same data, with the same interface.\n\nSo hopefully it’s\n[obvious](/blog/devops-strategy/)\n[by](/blog/gitlab-raises-20-million-to-complete-devops/)\n[now](/blog/from-dev-to-devops/) that we’re going\nfrom covering the development lifecycle to covering the entire DevOps lifecycle.\n\n\u003Cimg src=\"/images/blogimages/2018-product-vision/dev2devops.png\" alt=\"Dev to DevOps\" style=\"width: 700px;\"/>{: .shadow}\u003Cbr/>\n*\u003Csmall>From Dev to DevOps\u003C/small>*\n\nBut traditional DevOps tools only focus on the intersection between Dev and Ops,\nand GitLab is going to deliver a complete scope for both Dev and Ops. In\nparticular, that means we’re not just looking at how Developers can get their\ncode into production, but how Operations can then monitor and manage those\napplications and underlying infrastructure. A big milestone for GitLab will be\nwhen Operations people log into GitLab every day and consider it their main\ninterface for getting work done.\n\nBut even that’s not really sufficient, as we’re redefining what the scope of\nDevOps even is; we’re also covering Security and Business needs (such as project\nmanagers). Rather than coming up with some crazy DevSecBizOps name, we’re just\ncalling it DevOps, and putting it all into a single application.\n\n\u003Cimg src=\"/images/blogimages/2018-product-vision/devsecbizops.png\" alt=\"DevSecBizOps\" style=\"width: 700px;\"/>{: .shadow}\u003Cbr/>\n*\u003Csmall>No DevSecBizOps; a single application for DevOps\u003C/small>*\n\nAnd with that, each group gets an experience tailored to their needs, but shares\nthe same data and interface as everyone else, so collaboration is easy. Imagine\nan Ops person finds an issue in production, drills down to find the application\nwith the problem, and sees that a recent deploy caused the problem.\nSimultaneously, a dev gets alerted that their recent deploy triggered a change\nin production, goes to the merge request and sees the performance change right\nthere. When Dev, Ops, and Security talk, they’re looking at the same data, but\nfrom their own point of view.\n\nNow the scope we’re going after is quite large, with a lot of new categories\nbeing introduced this year. I won’t go into all of these today, but instead I\nwant to focus on a couple flows that paint a picture of how this could look.\n\n\u003Cimg src=\"/images/blogimages/2018-product-vision/product-categories.png\" alt=\"Product categories\" style=\"width: 700px;\"/>{: .shadow}\u003Cbr/>\n*\u003Csmall>New product categories in 2018\u003C/small>*\n\n## Interactive prototype\n\nFor this, I’ll switch over to an [interactive\nprototype](https://framer.cloud/UaofH/index.html). *[Note: if you want to try it for yourself, click the header to\nmove to the next page, click the left sidebar to move back.]* While this may\nlook like a fully functioning instance of GitLab, it is just a demo and many of\nthese features have not been implemented yet.\n\n### Development flow\n\nI’ll start by showing a merge request.\n\n\u003Cimg src=\"/images/blogimages/2018-product-vision/development.png\" alt=\"Developer Flow\" style=\"width: 700px;\"/>{: .shadow}\u003Cbr/>\n*\u003Csmall>Developer Flow: Merge Request\u003C/small>*\n\nOne of the new elements we see is a “Test summary” which shows a deeper\nunderstanding of your test results. Using standard JUnit XML output, we can tell\nexactly which tests fail, and provide that information in a nice summary format.\n\nWe also see links to the binary artifacts and container images associated with\nthis merge request.\n\nAs I scroll down, we see a lot of information about the extensive collection of\ntests we’ve run on the code.\n\nFirst we see the code quality section, which we’ve had for a while.\n\nThen the relatively new Security section with static [application security\ntesting](/topics/devsecops/) to find vulnerabilities in your *code* or your code's dependencies,\ndynamic application security testing to find vulnerabilities while actually\n*running your app*, and an analysis of any vulnerabilities in any of your\nunderlying Docker layers.\n\nWe’ll also show how your application performance has changed.\n\nAnd lastly, we’ll check your dependencies for any violations of your company’s\nlicense policy.\n\nNow, this is a LOT to cover for every merge request, so we have separate issues\nto redesign for all this new information, but I wanted to show it all to you now\nto see how much we’re doing automatically for you.\n\nDown below all of that is an enhanced code diff that highlights any code you\nshould pay attention to because of code quality concerns or missing test\ncoverage.\n\n\u003Cimg src=\"/images/blogimages/2018-product-vision/code-coverage.png\" alt=\"code coverage\" style=\"width: 700px;\"/>{: .shadow}\u003Cbr/>\n*\u003Csmall>Code coverage and alerts\u003C/small>*\n\nThis is all part of the “shift left” movement, where important quality,\nsecurity, and performance tests that may have once been run manually, if at all,\nand usually much later in the development lifecycle, are now being run\nautomatically as soon as the first code is written.\n\nThere’s a lot more planned, but this is a good idea of the direction we’re going\nin to help Developers get their ideas into production faster.\n\n### Operations flow\n\nBut that only covers part of our vision, because there’s also\nthe Operations point of view. And a big milestone for our DevOps vision is when\nOperations start using GitLab as their primary interface.\n\n\u003Cimg src=\"/images/blogimages/2018-product-vision/operations-health.png\" alt=\"Operations health\" style=\"width: 700px;\"/>{: .shadow}\u003Cbr/>\n*\u003Csmall>Operations flow: operations health dashboard\u003C/small>*\n\nThere’s a long way to go, but here we’re answering the question, “How is\nproduction doing?” In this case we’re seeing a group with four projects in it, and\na quick green/yellow/red indicator of how those projects are doing. We’ve put a\ngraph of the Apdex score there to represent the one-metric-to-watch.\n\nBelow the projects is a view of the cluster, including CPU and memory usage,\npossibly indicating when you need to scale up or down the cluster size.\n\nNow, if there was an indication that something was wrong, you’d be able to drill\ndown and see more details and rectify the situation.\n\nBut that’s only the first-level understanding of operations. I mean, if we’ve\ngot the data about how things are doing, why not proactively alert you to the\nproblem? Well, that’s the second level, and a natural step. But we’re not going\nto stop there. The third level is to automatically detect *and resolve* any\nissues. If your app needs more resources, just autoscale it. If you then hit a\nlimit on the cluster, well, add a node to the cluster automatically. The\nOperations experience then should really just be that I go to work in the morning\nand see an email summary of what has happened, without me having to do anything.\n\nBut autoscaling is just scratching the surface, as Operations involves a lot\nmore, from application, infrastructure, and network monitoring, to security\npatches. After we’ve got this breadth as a structure, we look forward to the\ncustomer feature requests.\n\n### Security flow\n\nSo that covers Dev and Ops, but we’ve got a lot of security\nfeatures in the product now. How about treating Security folks as first-class\ncitizens and giving them their own Security Audit view?\n\n\u003Cimg src=\"/images/blogimages/2018-product-vision/security-audit.png\" alt=\"Security audit\" style=\"width: 700px;\"/>{: .shadow}\u003Cbr/>\n*\u003Csmall>Security flow: security audit\u003C/small>*\n\nThis is your one-stop-shop to see what security vulnerabilities have been\ndetected across the group, showing any automatic or manual actions taken to\naddress the vulnerabilities, and of course letting you click into details.\n\nIn the top left we’re reporting an overall success rate in hitting our own\ninternal SLAs for security vulnerabilities.\n\n### Full circle\n\nLet’s drill down on one of these vulnerabilities.\n\n\u003Cimg src=\"/images/blogimages/2018-product-vision/automatic-updates.png\" alt=\"Automatic updates\" style=\"width: 700px;\"/>{: .shadow}\u003Cbr/>\n*\u003Csmall>Automatic updates for security vulnerabilities\u003C/small>*\n\nWe see that the GitLab Bot automatically created a merge request to upgrade one\nof our dependencies because it noticed that a new version was released.\n\nSince the tests all pass, and of course the merge request fixed the\nvulnerability, the merge request was automatically merged by the Bot as well.\n\nBut, to bring it full circle, l’m showing here that after merging, the CI/CD\npipeline started deploying automatically to Production. I mean, why leave a\nknown, fixable security vulnerability live any longer than it needs to, right?\n\nBut, in this case, even though all tests passed, we still saw the error rate\njump to more than five percent, so we automatically stopped the rollout process, and\nactually rolled back to the last-known good version immediately.\n\nThen, the Bot detects this and automatically reverts the merge request so we can\nleave `master` in a good state.\n\nPhew.\n\n## Summary\n\nSo, wrapping it up:\n1. We’re going after the **complete DevOps** lifecycle,\n2. we want **Operations and Security** to be our new favorite users, and\n3. we want teams working **concurrently**.\n\nAnd that’s the GitLab Product Vision for 2018!\n",[9,677],{"slug":3177,"featured":6,"template":680},"gitlabs-2018-product-vision","content:en-us:blog:gitlabs-2018-product-vision.yml","Gitlabs 2018 Product Vision","en-us/blog/gitlabs-2018-product-vision.yml","en-us/blog/gitlabs-2018-product-vision",{"_path":3183,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3184,"content":3190,"config":3196,"_id":3198,"_type":14,"title":3199,"_source":16,"_file":3200,"_stem":3201,"_extension":19},"/en-us/blog/gitlabs-global-compensation-calculator-the-next-iteration",{"title":3185,"description":3186,"ogTitle":3185,"ogDescription":3186,"noIndex":6,"ogImage":3187,"ogUrl":3188,"ogSiteName":667,"ogType":668,"canonicalUrls":3188,"schema":3189},"GitLab’s Global Compensation Calculator: The next iteration","We released a new version of our Compensation Calculator in January – here’s what that means for new and existing GitLab team-members.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667966/Blog/Hero%20Images/global-compensation-calculator-iteration.jpg","https://about.gitlab.com/blog/gitlabs-global-compensation-calculator-the-next-iteration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s Global Compensation Calculator: The next iteration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brittany Rohde\"}],\n        \"datePublished\": \"2018-03-23\",\n      }",{"title":3185,"description":3186,"authors":3191,"heroImage":3187,"date":3193,"body":3194,"category":299,"tags":3195},[3192],"Brittany Rohde","2018-03-23","\n\nWe know many of you have thoughts about our [Compensation Calculator](/handbook/total-rewards/compensation/compensation-calculator/#the-compensation-calculator)! We see your comments on Hacker News; we are listening and continually working on improving it. In line with our value of [iteration](https://handbook.gitlab.com/handbook/values/#iteration), we have made additional changes to our Compensation Calculator. In January 2018, we released a new version to align the calculator closer to market rates, and adjust all current team members’ pay to be in line with the outputs of the iterated version. Here’s how it works.\n\n \u003C!-- more -->\n\n## What is our new formula?\n\nYour compensation = [SF benchmark](#sf-benchmark) x (0.7 x (max (0.2, [Rent Index](#rent-index) + [Hot Market Adjustment](#hot-market-adjustment)) / 1.26) + 0.30) x [Level Factor](#level-factor) x [Experience Factor](#experience-factor) x [Contract Type Factor](#contract-type-factor) x [Country Factor](#country-factor)\n\n### SF benchmark\n\nThis is the employee salary at the 50th percentile for the role in San Francisco (SF), which we determine using various sources of market data including [Comptryx](http://www.comptryx.com/).\n\n### Rent Index\n\nThis is taken from [Numbeo](https://www.numbeo.com/cost-of-living/), which expresses the ratio of cost of rent in many metro areas. Since we are using San Francisco benchmarks, we divide by 1.26 to normalize the rent index to San Francisco. A minimum Rent Index of 0.2 is applied so no one is paid less than 41 percent of San Francisco's market.\n\nWe multiply the Rent Index by 0.7 and then add 0.3, so the sum would equal 1 (i.e. we pay San Francisco rates in San Francisco).\n\n### Hot Market Adjustment\n\nThis is an adjustment to any US-based metro area where the geographical area Rent Index is less than the Hot Market Adjustment plus the Numbeo Rent Index, to recognize that \"hot markets\" tend to have a Rent Index that is trailing (i.e. lower than) what one would expect based on compensation rates in the area.\n\n### Level Factor\n\nThis is currently defined as junior (0.8), intermediate (1.0), senior (1.2), staff (1.4), or manager (1.4), and will be defined as II (.8), III (1.0), Senior (1.2), Staff (1.4), or manager (1.4).\n\n### Experience Factor\n\nThis falls between 0.8 - 1.2 based on our [Experience Factor Guidelines](/handbook/total-rewards/compensation/compensation-calculator/#level-factor):\n\n- 0.8: New to the position requirements\n- 0.9: Learning the position requirements\n- 1: Comfortable with the requirements\n- 1.1: Thriving with the requirements\n- 1.2: Expert in the requirements\n\n### Country Factor\n\nThis is a ratio of the calculator to market data. We [determine this ratio](/handbook/total-rewards/compensation/compensation-calculator/#location-factor) by looking at how our calculator aligns to market in the region. If the calculator comes in higher than market, a factor lower than 1 is applied. If the calculator is in line with market, the factor stays at 1.\n\n### Contract Type Factor\n\nThis distinguishes between employee (1) or contractor (1.17). A contractor may carry the costs of their own health insurance, social security taxes, etc, leading to a 17 percent higher compensation for the contractor to account for the extra expenses to these GitLab team-members.\n\nThe calculator can be found on each position description. For example, take a look at our [Compensation Calculator for Developers](https://handbook.gitlab.com/job-families/engineering/backend-engineer/?area=San-Francisco_California&country=United-States&experience=0&level=Intermediate&low=96160&high=144240#compensation).\n\n## Using San Francisco Market Data\n\nThe first step in this iteration was to gather market data and incorporate it as the benchmarks for each role. After obtaining a global data set to map to our positions, we needed to decide if New York was still the right city to pivot the benchmarks around. After some analysis, we determined that San Francisco was a better source of data, so we adjusted the formula. We also analyzed and adjusted the parameters around rent index to ensure in San Francisco you make San Francisco's benchmark.\n\n## Instituting a Minimum Rent Index\n\nEarlier in 2017, we instituted a Geographical Areas iteration to the compensation calculator to ensure that there are not large pay differences in regions that have a similar job market. We looked at the rent indexes by [region](/handbook/total-rewards/compensation/compensation-calculator/#location-factor), determined any outliers on the high or low end of the rent index, and set the regional rent index at the highest of the remaining data set. With the January iteration of the compensation calculator, we also set a Minimum Rent Index so no one would be paid less than 41 percent of San Francisco’s market.\n\n## Adjusting our team’s pay\n\nWith this iteration of the compensation calculator, we wanted to align our team’s salaries according to market. We first looked at how experienced the team member is in their role by having the manager conduct an [Experience Factor Review](/handbook/total-rewards/compensation/compensation-calculator/#level-factor). This review verified we are paying our team in line with their experience, and not determining their experience to fit compensation. This review generates an output which is applied in the compensation calculator, but is also a great way to start the conversation around growth within each role. Managers and direct reports were able to review the experience factors and have constructive conversations around experience. Once we had all of the calculator inputs, including the up-to-date Experience Factor, our People Ops team reviewed all salaries to match the new compensation calculator. At the same time as the calculator was released, the increases to pay were also communicated.\n\n## What’s next, and why we think the compensation calculator is a powerful tool\n\nWe’ll continue to add more countries to our Country Factors list, review adding an additional factor for specialization within Development roles, review how the levels overlap when it comes to promotions, and review the Rent Indexes for countries with many data points (like the United States and United Kingdom).\n\nWe want to continue to make the calculator as reflective of market in as many locations as we can, given possible data constraints. This will go some way towards eliminating pay inequality among underrepresented groups, promote salary transparency on what each team member and candidate’s market value is, and save valuable recruiting time.\n\nWe also want to hear from you on where this calculator can continue to improve! Please let us know what you think in the comments.\n\n[Cover image](https://unsplash.com/photos/_zsL306fDck?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Antoine Dautry on [Unsplash](https://unsplash.com/search/photos/numbers?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[832,9,810],{"slug":3197,"featured":6,"template":680},"gitlabs-global-compensation-calculator-the-next-iteration","content:en-us:blog:gitlabs-global-compensation-calculator-the-next-iteration.yml","Gitlabs Global Compensation Calculator The Next Iteration","en-us/blog/gitlabs-global-compensation-calculator-the-next-iteration.yml","en-us/blog/gitlabs-global-compensation-calculator-the-next-iteration",{"_path":3203,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3204,"content":3209,"config":3214,"_id":3216,"_type":14,"title":3217,"_source":16,"_file":3218,"_stem":3219,"_extension":19},"/en-us/blog/gitops-with-gitlab-connecting-the-cluster",{"title":3205,"description":3206,"ogTitle":3205,"ogDescription":3206,"noIndex":6,"ogImage":2010,"ogUrl":3207,"ogSiteName":667,"ogType":668,"canonicalUrls":3207,"schema":3208},"GitOps with GitLab: Connect with a Kubernetes cluster","In our third article in our GitOps series, learn how to connect a Kubernetes cluster with GitLab for pull and push-based deployments.","https://about.gitlab.com/blog/gitops-with-gitlab-connecting-the-cluster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: Connect with a Kubernetes cluster\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-11-18\",\n      }",{"title":3205,"description":3206,"authors":3210,"heroImage":2010,"date":3211,"body":3212,"category":743,"tags":3213},[2531],"2021-11-18","\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\n## GitOps with GitLab: connecting a Kubernetes cluster\n\nThis [GitOps](/topics/gitops/) with GitLab post shows how to connect a Kubernetes cluster with GitLab for pull and push based deployments and easy security integrations. In order to do so, the following elements are required:\n\n- A Kubernetes cluster that you can access and can create new resources, including `Role` and `RoleBinding` in it. \n- You will need `kubectl` and your local environment configured to access the beforementioned cluster.\n- (Optional, recommended) Terraform and a Terraform project set up as shown [in the previous article](/blog/gitops-with-gitlab-infrastructure-provisioning/) to retrieve an agent registration token from GitLab.\n- (Optional, recommended) `kpt` and `kustomize` to install the Agent into your cluster.\n- (Optional, quickstart) If you prefer a less \"gitopsy\" approach, you will need `docker` (Docker Desktop is not needed). This is simpler to follow, but provides less control to you.\n\n## How to connect a cluster to GitLab\n\nThere are many ways how one can connect a cluster to GitLab:\n\n- you can set up a `$KUBECONTEXT` variable manually, manage all the related connections and use GitLab CI/CD to push changes into your cluster\n- you can use a 3rd party tool, like [ArgoCD](https://argo-cd.readthedocs.io/en/stable/) or [Flux](https://fluxcd.io) to get pull based deployments\n- you can use the legacy, certificate-based cluster integration within GitLab in which case GitLab will manage the `$KUBECONTEXT` for you and you can get easy metrics, log and monitoring integrations\n- or you can use the recommended approach, the [GitLab Agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/), to have pull and push based deployment support, network security policy integrations and the possibility of metrics and monitoring too\n\nWe are going to focus on the Agent-based setup here as we believe that it serves and will serve our users best, hopefully you included.\n\n## How does the Agent work\n\nThe Agent has a component that needs to be installed into your cluster. We call this component `agentk`. Once `agentk` is installed it reaches out to GitLab, and authenticates itself with an access token. So, the first step is to get a token from GitLab. We call this step \"the Agent registration.\" If the authentication succeeds, `agentk` sets up a bidirectional GRPC channel between itself and GitLab. The emphasis here is on \"bidirectional.\" This enables requests and messages to be sent by either side and provides the possibility of much deeper integrations than the other approaches while still being a nice citizen within your cluster.\n\nOnce the connection is established, the Agent retrieves its own configuration from GitLab. This configuration is a `config.yaml` file under a repository, and you actually register the location of this configuration file when you register a new Agent. The configuration describes the various capabilities enabled of an Agent.\n\nOn the GitLab side, `agentk` communicates with - what we call - the Kubernetes Agent Server, or `kas`. As most users do not have to deal with setting up `kas`, I won't write about it here. You need to be a GitLab administrator [to set up and manage `kas`](https://docs.gitlab.com/ee/administration/clusters/kas.html). If you are on gitlab.com, `kas` is available to you at `kas.gitlab.com`, thanks to our amazing SRE team.\n\nSo the steps we are going to take in this article are the following:\n\n1. Create a configuration file for the Agent\n1. Register the Agent and retrieve its authentication token\n1. Install `agentk` into the cluster together with the token\n\nFinally, we will set up an example pull-based deployment just to test that everything worked as expected. Let's get started!\n\n## How many Agents do you need for a larger setup\n\nWe recommend having a separate Agent registered at least against each of your environments. If you have multiple clusters, have at least one agent registered with each cluster. While it is possible to have many `agentk` deployments with the same authentication token and thus configuration file, this is not supported and might lead to syncronization problems!\n\nThe different agent configurations can use the same Kubernetes manifests for deployments. So maintaining a multi-region cluster where all the clusters should be identical does not require much effort. \n\nWe designed `agentk` to be very lightweight so you should not worry about deploying multiple instances of it into a cluster. \n\nWe know users who use separate `agentk` instances by squad for example. In these situations, the `squad` owns some namespaces in the cluster and each Agent can access only the namespaces available for their squad. This way `agentk` is not just a good citizen in your cluster, but is like a team member in your squad.\n\n## Create a configuration file for the Agent\n\nNote:\nYou can use either the Terraform project from the previous step or start with a new project. I will assume that we build on top of the Terraform setup from the previous article, linked above, that will come in handy when we want to register the Agent using Terraform. I won't go through setting up all the environment variables here for local Terraform run.\n\nDecide about your agent name, and create an empty file in your project under `.gitlab/agents/\u003Cyour agent name>/config.yaml`. Nota bene, that the extension is `yaml` not `yml` and your agent name must follow the [DNS label standard from RFC 1123](https://docs.gitlab.com/ee/user/clusters/agent/install/#create-an-agent-configuration-file). I'll call my agent `demo-agent`, so the file is under `.gitlab/demo-agent/config.yaml`.\n\n## Register the Agent\n\nThe next step is to register the Agent with GitLab. You can do this either through the GitLab UI or using Terraform. I will show you both approaches.\n\n### Registering through the UI\n\nOnce the configuration file is in place, visit `Infrastructure/Kubernetes` and add a new cluster using the Agent. A dialog will pop up where you can select your agent.\n\nOnce you hit \"next,\" you will see the registration token and a `docker` command for easy installation. The `docker` command includes the token too and you can run it to quickly set up an `agentk` inside of your cluster. (You might need to create a namespace first!) Feel free to run the command for a quickstart or follow the tutorial for a truly code-based approach.\n\n### Registering through code\n\nWe will use Terraform to register the Agent through code. Let's create the following files:\n\n- Under `terraform/gitlab-agent/main.tf`\n\n```hcl\nterraform {\n  backend \"http\" {\n  }\n  required_version = \">= 0.13\"\n  required_providers {\n    gitlab = {\n      source = \"gitlabhq/gitlab\"\n      version = \"~>3.6.0\"\n    }\n  }\n}\n\nprovider \"gitlab\" {\n    token = var.gitlab_password\n}\n\nmodule \"gitlab_kubernetes_agent_registration\" {\n  source = \"gitlab.com/gitlab-org/kubernetes-agent-terraform-register-agent/local\"\n  version = \"0.0.2\"\n\n  gitlab_project_id = var.gitlab_project_id\n  gitlab_username = var.gitlab_username\n  gitlab_password = var.gitlab_password\n  gitlab_graphql_api_url = var.gitlab_graphql_api_url\n  agent_name = var.agent_name\n  token_name = var.token_name\n  token_description = var.token_description\n}\n```\n\nAs you can see we will use a module here. The module is hosted using the Terraform registry provided by GitLab. You can check out [the module source code here](https://gitlab.com/gitlab-org/configure/examples/kubernetes-agent-terraform-register-agent). You might have guessed correctly that under the hood the module uses the GitLab GraphQL API to register the agent and retrieve a token. We will need to set up variables for it to work.\n\n- Create `terraform/gitlab-agent/variables.tf`\n\n```hcl\nvariable \"gitlab_project_id\" {\n  type = string\n}\n\nvariable \"gitlab_username\" {\n  type = string\n}\n\nvariable \"gitlab_password\" {\n  type = string\n}\n\nvariable \"agent_name\" {\n  type = string\n}\n\nvariable \"token_name\" {\n  type    = string\n  default = \"kas-token\"\n}\n\nvariable \"token_description\" {\n  type    = string\n  default = \"Token for KAS Agent Authentication\"\n}\n\nvariable \"gitlab_graphql_api_url\" {\n  type    = string\n  default = \"https://gitlab.com/api/graphql\"\n}\n```\n\n- Create `terraform/gitlab-agent/outputs.tf`\n\n```hcl\noutput \"agent_id\" {\n  value     = module.gitlab_kubernetes_agent_registration.agent_id\n}\n\noutput \"token_secret\" {\n  value     = module.gitlab_kubernetes_agent_registration.token_secret\n  sensitive = true\n}\n```\n\nOnce the registration is over, you'll be able to retrieve the agent ID and the token using these Terraform outputs.\n\n### Run the Terraform project\n\nOnce the above code is in place, we need to run it to actually register the Agent. Here, I am going to extend the setup from the previous article.\n\n#### Running locally\n\n- Create `terraform/gitlab-agent/.envrc`  as you did for the network project.\n\n```\nexport TF_STATE_NAME=${PWD##*terraform/}\nsource_env ../../.main.env\n```\n\nNow run Terraform\n\n```bash\nterraform init\nterraform plan\nterraform apply\n```\n\n#### Running from CI/CD pipeline\n\nExtend the `.gitlab-ci.yml` file with the following 3 jobs:\n\n```hcl\ngitlab-agent:init:\n  extends: .terraform:init\n  stage: init\n  variables:\n    TF_ROOT: terraform/gitlab-agent\n    TF_STATE_NAME: gitlab-agent\n  only:\n    changes:\n      - \"terraform/gitlab-agent/*\"\n\ngitlab-agent:review:\n  extends: .terraform:build\n  stage: build\n  variables:\n    TF_ROOT: terraform/gitlab-agent\n    TF_STATE_NAME: gitlab-agent\n  resource_group: tf:gitlab-agent\n  only:\n    changes:\n      - \"terraform/gitlab-agent/*\"\n\ngitlab-agent:deploy:\n  extends: .terraform:deploy\n  stage: deploy\n  variables:\n    TF_ROOT: terraform/gitlab-agent\n    TF_STATE_NAME: gitlab-agent\n  resource_group: tf:gitlab-agent\n  environment:\n    name: demo-agent\n  when: manual\n  only:\n    changes:\n      - \"terraform/gitlab-agent/*\"\n    variables:\n      - $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n```\n\nAs you can see these are the same jobs that we saw already, they are just parameterized for the `gitlab-agent` terraform project.\n\nNota bene, even if you use GitLab to register the Agent, you will need your command line to install `agentk` for the first time! As a result, you can not avoid a local setup as you will need to run at least `terraform output` to retrieve the token!\n\n## Install `agentk`\n\nIn this tutorial we are going to follow [the advanced installation instructions](https://docs.gitlab.com/ee/user/clusters/agent/install/index.html#advanced-installation) from the GitLab documentation. This approach is highly customizable using `kustomize` and `kpt`.\n\nFirst, let's retrieve the basic Kubernetes resource definitions using `kpt`:\n\n- Create a directory `packages` using `mkdir packages`\n- Run `kpt pkg get https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent.git/build/deployment/gitlab-agent packages/gitlab-agent`\n\nThis will retrieve the most recent version of the `agentk` installation resources. You can request a tagged version with the well-known `@` syntax, for example by running `kpt pkg get https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent.git/build/deployment/gitlab-agent@v14.4.0 packages/gitlab-agent`. You can see [all the available versions here](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/tags).\n\n### Why `kpt` - could we make this a box?\n\nThe choice of `kpt` is because it allows sane upstream package management to you. With `kpt` you will be able to regularly update your packages using something like `kpt pkg update packages/gitlab-agent@\u003Cnew version> --strategy=resource-merge`. It basically allows you to modify your package locally, and will try to merge upstream changes into it. Read the `kpt pkg update -h` output for more information and alternative merge strategies.\n\n### Continue with the installation - if it's a box, this is not needed\n\nThe `kpt` packages you retrieved are actually a set up `kustomize` overlays. The `base` defines only the `agentk` deployment and namespace; the `cluster` defines some default RBAC around the deployment. Feel free to add your own overlays and use those. We will extend this package with custom overlays in a part 6 of the series.\n\nTo configure the package, see the available configuration options using:\n\n```bash\nkustomize cfg list-setters packages/gitlab-agent\n        NAME                 VALUE               SET BY                  DESCRIPTION              COUNT   REQUIRED   IS SET  \n  agent-version       stable                 package-default   Image tag for agentk container     1       No         No      \n  kas-address         wss://kas.gitlab.com   package-default   kas address. Use                   1       No         No      \n                                                               grpc://host.docker.internal:8150                              \n                                                               if connecting from within Docker                              \n                                                               e.g. from kind.                                               \n  name-prefix                                                  Prefix for resource names          1       No         No      \n  namespace           gitlab-agent           package-default   Namespace to install GitLab        2       No         No      \n                                                               Kubernetes Agent into                                         \n  prometheus-scrape   true                   package-default   Enable or disable Prometheus       1       No         No      \n                                                               scraping of agentk metrics.                              \n```\n\nThe package default will be different if you used a tagged version for getting the package. Let's set the version as using `stable` is not recommended.\n\n```bash\nkustomize cfg set packages/gitlab-agent agent-version v14.4.1\nset 1 field(s) of setter \"agent-version\" to value \"v14.4.1\"\n```\n\nFeel free to adjust the other configuration options too or add you own overlays if that is needed.\n\n### Which agent-version to use - could we make this a box?\n\nIf possible the version of `agentk` should match the major and minor version of your GitLab instance. You can find our the version of your GitLab instance under the Help menu on the UI.\n\nIf there is no agent version with your major and minor version, then pick the agent with the highest major and minor below the version of your GitLab.\n\n### Continue with the installation - if it's a box, this is not needed\n\nWarning:\nBefore the next step, I want to warn you about never, ever committing unencrypted secrets into git, and the agent registration token is a secret!\n\nLet's retrieve the agent registration token from our Terraform project. Run the following command in the `terraform/gitlab-agent` directory:\n\n```bash\nterraform output -raw token_secret > ../../packages/gitlab-agent/base/secrets/agent.token\n```\n\nThis writes the registration token to a file on your local computer. Do not commit these changes to git!\n\nAt this point, we are ready to deploy `agentk` into the cluster, so run:\n\n```bash\nkustomize build packages/gitlab-agent/cluster | kubectl apply -f -\n```\n\nLet's get rid of the secret:\n\n```bash\necho \"Invalid token\" > packages/gitlab-agent/base/secrets/agent.token\n```\n\nYou are good to commit your changes to `git` now!\n\n## Testing the setup\n\nWe have installed the Agent, now what? How can we start using it? In the next article we will see in detail how to deploy a more serious application into the cluster. Still, to check that cluster syncronization actually works, let's deploy a `ConfigMap`.\n\n- Create `kubernetes/test_config.yaml` with the following content:\n\n```yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: gitlab-gitops\n  namespace: default\ndata:\n  key: It works!\n```\n\n- Modify your Agent configuration file under `.gitlab/demo-agent/config.yaml`, and add the following to it:\n\n```yaml\ngitops:\n  # Manifest projects are watched by the agent. Whenever a project changes,\n  # GitLab deploys the changes using the agent.\n  manifest_projects:\n  - id: path/to/your/project\n    default_namespace: gitlab-agent\n    # Paths inside of the repository to scan for manifest files.\n    # Directories with names starting with a dot are ignored.\n    paths:\n    - glob: 'kubernetes/test_config.yaml'\n    #- glob: 'kubernetes/**/*.yaml'\n```\n\nChange the `- id: path/to/your/project` line above to point to your project's path!\n\nThe above configuration tells the Agent to kepp the `kubernetes/test_config.yaml` file in sync with the cluster. I've left a commented line at the end to show how you could use wildcards. This will come handy in future steps of this article. The`default_namespace` is used if no namespace is provided in the Kuberentes manifests. There are many other options to configure as well even for the `gitops` use case. You can read more about these in [the configuration file reference documentation](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html).\n\nOnce you commit the above changes, GitLab notifies `agentk` about the changed files. First, `agentk` updates its configuration; second, it retrieves the `ConfigMap`.\n\nWait a few seconds, and run `kubectl describe configmap gitlab-gitops` to check that the changes got appliedd to your cluster. You should see something similar:\n\n```\nName:         gitlab-gitops\nNamespace:    default\nLabels:       \u003Cnone>\nAnnotations:  config.k8s.io/owning-inventory: 502-28431043\n              k8s-agent.gitlab.com/managed-object: managed\n\nData\n====\nkey:\n",[1091,231,9],{"slug":3215,"featured":6,"template":680},"gitops-with-gitlab-connecting-the-cluster","content:en-us:blog:gitops-with-gitlab-connecting-the-cluster.yml","Gitops With Gitlab Connecting The Cluster","en-us/blog/gitops-with-gitlab-connecting-the-cluster.yml","en-us/blog/gitops-with-gitlab-connecting-the-cluster",{"_path":3221,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3222,"content":3227,"config":3232,"_id":3234,"_type":14,"title":3235,"_source":16,"_file":3236,"_stem":3237,"_extension":19},"/en-us/blog/gitops-with-gitlab-infrastructure-provisioning",{"title":3223,"description":3224,"ogTitle":3223,"ogDescription":3224,"noIndex":6,"ogImage":2010,"ogUrl":3225,"ogSiteName":667,"ogType":668,"canonicalUrls":3225,"schema":3226},"GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform","In part two of our GitOps series, we set up the infrastructure using GitLab and Terraform. Here's everything you need to know.","https://about.gitlab.com/blog/gitops-with-gitlab-infrastructure-provisioning","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-11-04\",\n      }",{"title":3223,"description":3224,"authors":3228,"heroImage":2010,"date":3229,"body":3230,"category":743,"tags":3231},[2531],"2021-11-04","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nThis post focuses on setting up the underlying infrastructure using GitLab and Terraform. \n\nThe first step is to have a network and some computing instances that we can use as our Kubernetes cluster. In this project, I’ll use [Civo](https://www.civo.com) to host the infrastructure as it has the most minimal setup, but the same can be achieved using any of the hyperclouds. GitLab documentation provides examples on how to set up a [cluster on AWS](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_eks_cluster.html) or [GCP](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_gke_cluster.html).\n\nWe want to have a project that describes our [infrastructure as code (IaC)](/topics/gitops/infrastructure-as-code/). As Terraform is today the de facto standard in infrastructure provisioning, we’ll use Terraform for the task. Terraform requires a state storage backend; We will use the GitLab managed Terraform state that is very easy to get started. Moreover, we will set up a pipeline to run the infrastructure changes automatically if they are merged to the main branch.\n\n## What infrastructure related steps are we going to codify?\n\n1. Create a VPC\n2. Set up a Kubernetes cluster\n\nActually, we will create separate Terraform projects for these 3 steps under a single GitLab project. We split the infrastructure because in a real world scenario, these projects will likely be a bit bigger, and Terraform slows down quite a lot if it has to deal with big projects. In general, it is a good practice to have small Terraform projects, and think about the infrastructure in a layered way, where higher layers can reference the output of lower layers. There are [many ways to access the output of another Terraform project](https://www.terraform.io/docs/language/state/remote-state-data.html#alternative-ways-to-share-data-between-configurations), and we leave it up to the reader to learn more about these. In this case, we will use simple data resources.\n\nAfter this long intro, let’s get started!\n\n## Creating the network\n\nFirst, let’s create a new GitLab project. You can use either an empty project or any of the project templates. If you plan to do all these tutorials, I recommend starting with the [Cluster Management Project template](https://docs.gitlab.com/ee/user/clusters/management_project_template.html). Once the project is ready, let’s create the following files:\n\n- A `terraform/network/main.tf` file:\n\n```hcl\nterraform {\n  required_providers {\n    civo = {\n      source = \"civo/civo\"\n      version = \"0.10.10”\n    }\n  }\n  backend \"http\" {\n  }\n}\n\n# Configure the Civo Provider\nprovider \"civo\" {\n  token = var.civo_token\n  region = local.region\n}\n\nresource \"civo_network\" \"network\" {\n    label = \"development\"\n}\n```\n\nThis file describes almost everything we want this project to do. The first block configures Terraform to use the `civo/civo` provider and a simple `http` backend for state storage. As I mentioned above, we will use [the GitLab managed Terraform state](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html), that acts like an `http` backend from Terraform’s point of view. The GitLab backend is versioned and encrypted by default, and GitLab CI/CD contains all the environment variables needed to access it. I will demonstrate later how you can access the backend either from the local command line or from GitLab CI/CD.\n\nNext we configure the `Civo` provider. You can see that here we use two variables, an input and a local variable. These will be defined in separate files below. Finally, we describe a network and give it the “development” label.\n\n- A `terraform/network/outputs.tf` file:\n\n```hcl\noutput \"network\" {\n  value = civo_network.network.id\n}\n```\n\nThis file just provides the network id as an output variable from Terraform. Other projects could consume it. We won’t use this, but I consider it a good practice as it might help to debug issues.\n\n- A `terraform/network/locals.tf` file:\n\n```hcl\nlocals {\n  region = \"LON1\"\n}\n```\n\nHere we define the `region` local as mentioned under the description of the `main.tf` file. Why aren’t we making it an input variable? Because this is closely related to our infrastructure and for this reason we want to keep it in code. It should be version controlled and changes should be reviewed following the team’s processes. We could write the values into a `.tfvars` file also to achieve versioning and have it as a variable. I prefer to keep it in `hcl` to have it closer to the rest of the code.\n\n- A `terraform/network/variables.tf` file:\n\n```hcl\nvariable \"civo_token\" {\n  type = string\n  sensitive = true\n}\n```\n\nFinally, we define the Civo access token as an input variable.\n\nNow, we are ready with the Terraform code, but we cannot access the GitLab state backend yet. For that we either need to configure our local environment or GitLab CI/CD. Let’s see both setups.\n\n## Running Terraform locally\n\nYou can run Terraform either locally or using GitLab CI/CD. The following two sections present both approaches.\n\n### Accessing the GitLab Terraform state backend locally\n\nThe simplest way to configure the “http” backend is using environment variables. There are many environment variables needed though! For this reason, I prefer to use a collection of [direnv](https://direnv.net/) files. We will need all these environment variables configured:\n\n```\nTF_HTTP_PASSWORD\nTF_HTTP_USERNAME\nTF_HTTP_ADDRESS\nTF_HTTP_LOCK_ADDRESS\nTF_HTTP_LOCK_METHOD\nTF_HTTP_UNLOCK_ADDRESS\nTF_HTTP_UNLOCK_METHOD\nTF_HTTP_RETRY_WAIT_MIN\n```\n\nDirenv enables us to add a few files to our repository to describe the above environment variables in a nice and scalable way. Clearly, there are some variables that are sensitive, like `TF_HTTP_PASSWORD`, so this should not be stored in git. Moreover, we could reuse most of these variables in the other two Terraform projects we are going to create. With these considerations in mind, let’s create the following 3 files:\n\n- Create `terraform/network/.envrc`: \n\n```\nexport TF_STATE_NAME=civo-${PWD##*terraform/}\nsource_env ../../.main.env\n```\n\nThis sets the `TF_STATE_NAME` variable to `civo-network` using some bash magic and loads the `.main.env` file from the root of the repository using the `source_env` method provided by `direnv`. This can be added to version control safely.\n\n- Create `.main.env`:\n\n```\nsource_env_if_exists ./.local.env\n\nCI_PROJECT_ID=28431043\nexport TF_HTTP_PASSWORD=\"${CI_JOB_TOKEN:-$GITLAB_ACCESS_TOKEN}\"\nexport TF_HTTP_USERNAME=\"${GITLAB_USER_LOGIN}\"\nexport GITLAB_URL=https://gitlab.com\n\nexport TF_VAR_remote_address_base=\"${GITLAB_URL}/api/v4/projects/${CI_PROJECT_ID}/terraform/state\"\nexport TF_HTTP_ADDRESS=\"${TF_VAR_remote_address_base}/${TF_STATE_NAME}\"\nexport TF_HTTP_LOCK_ADDRESS=\"${TF_HTTP_ADDRESS}/lock\"\nexport TF_HTTP_LOCK_METHOD=\"POST\"\nexport TF_HTTP_UNLOCK_ADDRESS=\"${TF_HTTP_LOCK_ADDRESS}\"\nexport TF_HTTP_UNLOCK_METHOD=\"DELETE\"\nexport TF_HTTP_RETRY_WAIT_MIN=5\n\n# export TF_LOG=\"TRACE\"\n```\n\nThis file contains the bulk of the environment variables we need, and can be added to version control safely as no secrets are stored there. The first line loads the `.local.env` file that will contain the sensitive values, again using a `direnv` method. The second line contains the GitLab project ID. This is shown under the project name of your GitLab project. The next three lines configure access to GitLab. The username and password will be populated from the `local.env` file, while the `GITLAB_URL` variable is there to help you if you are on a self-managed GitLab instance.\n\n- Create `.local.env` and add it to `.gitignore`:\n\n```\nGITLAB_ACCESS_TOKEN=\u003Cyour GitLab personal access token>\nGITLAB_USER_LOGIN=\u003Cyour GitLAb username>\nexport TF_VAR_civo_token=\u003Cyour Civo access token>\n```\n\nClearly, I cannot provide the values for this file. Please fill them out with your credentials. You can generate a GitLab personal access token under your settings. To access the GitLab managed Terraform state using a personal access token, the token should have the `api` scope enabled.\n\nWarning: **Don’t forget to add this file to `.gitignore`**. Actually, I have it in my global gitignore file to avoid accidental commits.\n\nAs the environment variables are set up, you should make direnv to start using these variables. When you `cd` into the `terraform/network` directory a warning should appear asking you to run `direnv allow`. Enable the environment variables:\n\n```\ncd terraform/network\ndirenv allow\n```\n\n### Creating the network - finally\n\nLet’s see if we managed to set up everything right!\n\n```\nterraform init\nterraform plan\n```\n\nThe first command just initializes Terraform, downloads the Civo plugin and does some sanity checks. The second command on the other hand connects to the remote state backend, and computes the necessary changes to provide the infrastructure we described in this project.\n\nIf we like the changes, we can apply them with\n\n```\nterraform apply\n```\n\n_Nota bene_, in a real world setup, you would likely output a plan file from `terraform plan` and feed it into `terraform apply`, just like the CI/CD setup will do it later. Anyway, this is good enough for us, so let’s create the cluster next.\n\n### Running Terraform using GitLab CI/CD\n\nNote: This section assumes that you have access to GitLab Runners to run the CI/CD jobs.\n\nGiven the flexibility of GitLab CI/CD it can be set up in many different ways. Here we will build a pipeline that incorporates the most important aspects of a Terraform-oriented pipeline, without restricting you to require merge requests or any other processes. The only restriction we'll place on it is that changes should only be applied on the main branch and this should be a manual action.\n\nCopy the following code into `.gitlab-ci.yml` in the root of your project:\n\n```yaml\ninclude:\n  - template: \"Terraform/Base.latest.gitlab-ci.yml\"\n\nstages:\n- init\n- build\n- deploy\n\nnetwork:init:\n  extends: .terraform:init\n  stage: init\n  variables:\n    TF_ROOT: terraform/network\n    TF_STATE_NAME: network\n  only:\n    changes:\n      - \"terraform/network/*\"\n\nnetwork:review:\n  extends: .terraform:build\n  stage: build\n  variables:\n    TF_ROOT: terraform/network\n    TF_STATE_NAME: network\n  resource_group: tf:network\n  only:\n    changes:\n      - \"terraform/network/*\"\n\nnetwork:deploy:\n  extends: .terraform:deploy\n  stage: deploy\n  variables:\n    TF_ROOT: terraform/network\n    TF_STATE_NAME: network\n  resource_group: tf:network\n  environment:\n    name: dns\n  when: manual\n  only:\n    changes:\n      - \"terraform/network/*\"\n    variables:\n      - $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n```\n\nThis CI pipeline re-uses [the latest base Terraform CI template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/Terraform) shipped with GitLab, and runs the jobs by simply parameterizing them as function calls. Let's review quickly the keys used:\n\n- the [`stages`](https://docs.gitlab.com/ee/ci/yaml/#stages) keyword provides a list of stages to compose the pipeline\n- the [`extends`](https://docs.gitlab.com/ee/ci/yaml/#extends) keyword refers to the job defined in [the base Terraform template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Terraform/Base.latest.gitlab-ci.yml)\n- the [`variables`](https://docs.gitlab.com/ee/ci/yaml/#variables) keywords parameterizes the job for our requirements\n- the [`resource_group`](https://docs.gitlab.com/ee/ci/yaml/#resource_group) keyword assures that always only one potentially conflicting job is run\n- the [`only`](https://docs.gitlab.com/ee/ci/yaml/#only--except) keyword restricts runs to specific situations\n\nIf you commit this file and push it to GitLab, a new pipeline will be created that as a last step provides you a manual job to create your network. We will extend this file later throughout this tutorial series.\n\n## Create a Kubernetes cluster\n\nThe code required for the cluster will be very similar to the code for the network.\n\n- Add `terraform/cluster/outputs.tf` file:\n\n```hcl\nterraform {\n  required_providers {\n    civo = {\n      source = \"civo/civo\"\n      version = \"0.10.4\"\n    }\n  }\n  backend \"http\" {\n  }\n}\n\n# Configure the Civo Provider\nprovider \"civo\" {\n  token = var.civo_token\n  region = local.region\n}\n\nresource \"civo_kubernetes_cluster\" \"dev-cluster\" {\n    name = \"dev-cluster\"\n    // tags = \"gitlab demo\"  // Do not add tags! There is a bug in the civo-provider :(\n    network_id = data.civo_network.network.id\n    applications = \"\"\n    num_target_nodes = 3\n    target_nodes_size = element(data.civo_instances_size.small.sizes, 0).name\n}\n```\n\nThe only difference compared to `terraform/network/outputs.tf` is the last resource as that describes the cluster. You can see how we reference the network created before. Of course, we'll need a `data` resource for this and the instance sizes.\n\n- Add `terraform/cluster/data.tf` file:\n\n```hcl\ndata \"civo_instances_size\" \"small\" {\n    filter {\n        key = \"name\"\n        values = [\"g3.small\"]\n        match_by = \"re\"\n    }\n\n    filter {\n        key = \"type\"\n        values = [\"instance\"]\n    }\n\n}\n\ndata \"civo_network\" \"network\" {\n    label = \"development\"\n}\n```\n\n\n- The `terraform/cluster/locals.tf` file outputs some useful details. We won't use them now, but they often come in handy in the longer term.\n\n```hcl\noutput \"cluster\" {\n  value = {\n    status = civo_kubernetes_cluster.dev-cluster.status\n    master_ip = civo_kubernetes_cluster.dev-cluster.master_ip\n    dns_entry = civo_kubernetes_cluster.dev-cluster.dns_entry\n  }\n}\n```\n\n- The `terraform/cluster/locals.tf` file is the same as for the network project:\n\n```hcl\nlocals {\n  region = \"LON1\"\n}\n```\n\n- The `terraform/cluster/variables.tf` file is the same as for the network project:\n\n```hcl\nvariable \"civo_token\" {\n  type = string\n  sensitive = true\n}\n```\n\n### Provision the cluster\n\nLet's see how can we extend the previous local and CI/CD setups to run this Terraform project!\n\n#### Running locally\n\n- Create `terraform/cluster/.envrc`  as you did for the network project:\n\n```\nexport TF_STATE_NAME=civo-${PWD##*terraform/}\nsource_env ../../.main.env\n```\n\nNow run Terraform:\n\n```bash\nterraform init\nterraform plan\nterraform apply\n```\n\n#### Running from CI/CD\n\nExtend the `.gitlab-ci.yaml` file with the following 3 jobs:\n\n```hcl\ncluster:init:\n  extends: .terraform:init\n  stage: init\n  variables:\n    TF_ROOT: terraform/cluster\n    TF_STATE_NAME: cluster\n  only:\n    changes:\n      - \"terraform/cluster/*\"\n\ncluster:review:\n  extends: .terraform:build\n  stage: build\n  variables:\n    TF_ROOT: terraform/cluster\n    TF_STATE_NAME: cluster\n  resource_group: tf:cluster\n  only:\n    changes:\n      - \"terraform/cluster/*\"\n\ncluster:deploy:\n  extends: .terraform:deploy\n  stage: deploy\n  variables:\n    TF_ROOT: terraform/cluster\n    TF_STATE_NAME: cluster\n  resource_group: tf:cluster\n  environment:\n    name: dev-cluster\n  when: manual\n  only:\n    changes:\n      - \"terraform/cluster/*\"\n    variables:\n      - $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n```\n\nAs you can see these are the same jobs that we saw already, they are just parameterized for the `cluster` Terraform project.\n\nOnce you push your code to GitLab, you cluster should be ready in a few minutes!\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n\n",[530,1091,9],{"slug":3233,"featured":6,"template":680},"gitops-with-gitlab-infrastructure-provisioning","content:en-us:blog:gitops-with-gitlab-infrastructure-provisioning.yml","Gitops With Gitlab Infrastructure Provisioning","en-us/blog/gitops-with-gitlab-infrastructure-provisioning.yml","en-us/blog/gitops-with-gitlab-infrastructure-provisioning",{"_path":3239,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3240,"content":3245,"config":3250,"_id":3252,"_type":14,"title":3253,"_source":16,"_file":3254,"_stem":3255,"_extension":19},"/en-us/blog/gitops-with-gitlab-secrets-management",{"title":3241,"description":3242,"ogTitle":3241,"ogDescription":3242,"noIndex":6,"ogImage":2010,"ogUrl":3243,"ogSiteName":667,"ogType":668,"canonicalUrls":3243,"schema":3244},"GitOps with GitLab: How to tackle secrets management","In part four of our GitOps series, we learn how to manage secrets with the GitLab Agent for Kubernetes.","https://about.gitlab.com/blog/gitops-with-gitlab-secrets-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: How to tackle secrets management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-12-02\",\n      }",{"title":3241,"description":3242,"authors":3246,"heroImage":2010,"date":3247,"body":3248,"category":743,"tags":3249},[2531],"2021-12-02","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can also view our entire [\"Ultimate guide to GitOps with GitLab\"](/blog/the-ultimate-guide-to-gitops-with-gitlab/) tutorial series._\n\nIn this article we will use our cluster connection to manage secrets within our cluster.\n\n## Prerequisites\n\nThis article assumes that you have a Kubernetes cluster connected to GitLab using the GitLab Agent for Kubernetes. If you don't have such a cluster, I recommend looking at the linked articles above so you have a similar setup from where we will start today.\n\n## A few words about secrets management\n\nThe Kubernetes `Secret` resource is a rather tricky one! By design, secrets should have limited access and should be encrypted at rest and in transit. Still, by default, Kubernetes does not encrypt secrets at rest and accessing them might not be restricted in your cluster. We will not go into detail about how to secure your cluster with respect to secrets in this article. Instead, we want to focus on getting some secrets configured in your cluster with a GitOps approach.\n\nManaging secrets with GitOps means you store those secrets within your Git repository. Of course, you should never store unencrypted secrets in a repo, and some security people are even reluctant to store encrypted secrets in Git. We will not be that worried, but you should consider if this is an acceptable risk for you. There is an alternative we'll talk about, below, if you prefer to not manage your secrets in Git.\n\nThere are a few benefits of Git-based secrets management:\n\n- you get versioning by default\n- collaboration is supported using merge requests\n- as secrets are in code, you push responsibilities towards the development team\n- the tools used are well-known to developers\n\n## Secrets management with GitLab\n\nWhen it comes to secrets, Kubernetes, and GitLab, there are at least 3 options to choose from:\n\n- create secrets automatically from environment variables in GitLab CI\n- manage secrets through HashiCorp Vault and GitLab CI\n- manage secrets in git with a GitOps approach\n\n### Create secrets automatically from environment variables in GitLab CI\n\nThe Auto Deploy template applies every [`K8S_SECRET_` prefixed environment variable](https://docs.gitlab.com/ee/topics/autodevops/customize.html#application-secret-variables) into your cluster as a Kubernetes Secret. Later, your applications can reference these secrets. This approach is the simplest to use, especially if you would like to use [Auto DevOps](/topics/devops/). We will look into it in a future article.\n\nWhile simple to use, with this approach your secrets are stored in the GitLab database, instead of `Git`. That means you lose versioning of the secrets, you need `Maintainer` rights to modify these secrets, and you lose the ability to approve a change of secret in a merge request.\n\n### Manage secrets through HashiCorp Vault and GitLab CI\n\n[GitLab CI/CD integrates with HashiCorp Vault](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/#authenticating-and-reading-secrets-with-hashicorp-vault) to support advanced secrets management use cases. You can combine the `K8S_SECRET_` prefixed use case even with Vault-based secrets, and have the secrets applied automatically. \n\nWith this approach, you get the all the benefits of HashiCorp Vault, but there is a question: why do you move secrets from Vault to GitLab just to move them to your cluster instead of retrieving the secrets directly from within your cluster? We recommend leaving GitLab out of this flow if you don't have a really good reason to provide secret access to GitLab too! Vault has really great Kubernetes support, thus retrieving secrets directly should be feasible.\n\n### Manage secrets in Git with a GitOps approach\n\nTo manage secrets in Git, we will need some kind of tooling to take care of the encryption/decryption of the secrets. In this article, I will show you how to set up and use [Bitnami's Sealed Secrets](https://github.com/bitnami-labs/sealed-secrets), but you can try other tools, like [SOPS](https://github.com/mozilla/sops) too. We will look into Bitnami's approach as it targets Kubernetes exclusively, unlike SOPS that supports other use cases too, and might need a bit more setup for Kubernetes.\n\nBitnami's Sealed Secrets is composed of an in-cluster controller and a CLI tool. The cluster component defines a `SealedSecret` custom resource that stores the encrypted secret and related metadata. Once a `SealedSecret` is deployed into the cluster, the controller decrypts it and creates a native Kubernetes `Secret` resource from it. To create a `SealedSecret` resource, the `kubeseal` utility can be used. `kubeseal` can take a public key and transform and encrypt a native Kubernetes `Secret` into a `SealedSecret`, and `kubeseal` can help with retrieving the public key from the cluster-side controller too.\n\n## Setting up Bitnami's Sealed Secrets\n\nAs the GitLab Agent supports pure Kubernetes manifests to do GitOps, we will need the manifests for Sealed Secrets. Open the [Sealed Secrets releases page](https://github.com/bitnami-labs/sealed-secrets/releases/) and find the most recent release (Don't be fooled by the `helm` releases!). At the time of writing this article, the most recent [release is v0.16.0](https://github.com/bitnami-labs/sealed-secrets/releases/tag/v0.16.0). From there you can download the release `yaml`, if your cluster supports RBAC, I recommend the basic `controller.yaml` file.\n\n- Save and commit the `controller.yaml` under `kubernetes/sealed-secrets.yaml`\n\nPush the changes and wait a few seconds for them to get applied. Check that they got applied successfully using: `kubectl get pods -n kube-system -l name=sealed-secrets-controller`\n\n## Retrieving the public key\n\nWhile the user can encrypt a secret directly with `kubeseal`, this approach requires them to have access to the Kube API. Instead of providing access, we can fetch the public key from the Sealed Secrets controller and store it in the Git repo. The public key can be used to encrypt secrets, but is useless for decrypting them.\n\n```bash\nkubeseal --fetch-cert > sealed-secrets.pub.pem\n```\n\n### How to avoid storing unencrypted secrets\n\nI prefer to have an `ignored` directory within my Git repo. The content of this directory is never committed to Git, and I put every sensitive data under this directory.\n\n```bash\nmkdir ignored\ncat \u003C\u003CEOF > ignored/.gitignore\n*\n!.gitignore\nEOF\n```\n\n## Continue with setup - not needed if we use a box\n\nNow, you can create sealed secrets with the following two commands:\n\n```bash\necho \"Very secret\" | kubectl create secret generic my-secret -n gitlab-agent --dry-run=client --type=Opaque --from-file=token=/dev/stdin -o yaml > ignored/my-secret.yaml\nkubeseal --format=yaml --cert=sealed-secrets.pub.pem \u003C ignored/my-secret.yaml > kubernetes/\n```\n\nThe first command creates a regular Kubernetes `Secret` resource in the `gitlab-agent` namespace. Setting the namespace is important if you use Sealed Secrets and every SealedSecret is scoped for a specific namespace. You can read more about this in the Sealed Secrets documentation.\n\nThe second command takes a `Secret` resource object and turns it into an encrypted `SealedSecret` resource. In my case, the secret file:\n\n```yaml\napiVersion: v1\ndata:\n  token: VmVyeSBzZWNyZXQK\nkind: Secret\nmetadata:\n  creationTimestamp: null\n  name: my-secret\n  namespace: gitlab-agent\ntype: Opaque\n```\n\ngot turned into:\n\n```yaml\napiVersion: bitnami.com/v1alpha1\nkind: SealedSecret\nmetadata:\n  creationTimestamp: null\n  name: my-secret\n  namespace: gitlab-agent\nspec:\n  encryptedData:\n    token: AgC1m/D1UwliKD3C2QSv/g+zBi1qGz1YTLZfqnl5JJ4NydCatKzsp8LZr2stIlkwcS3f2YAo/ZIq1OUhOgSgkuNMwVdqsBx1zq7Z3xpGLMIMe7B3XhQ+ExWwqgrm1dTiTDHaH9eXsZWaNsruKQU0F8oGxgLfO/axEZeGWd4WngZRaed9B43dy2k05B6fZnxmwtUVSpr86MO52fX06/QdbvB8MZTrYb7qFuL14U0IDvdFl4l8sPl2rrXsriKg0fJHIV6XtlCwPpQGozTZTUX8nbvU0yXothBzPbaIUfXseFqaW8i/i0Ai+aKhWQAjPGooVAXGwKsuve16DxZ6GJPp1ymR1cEsBkEPlYKbVCKtH5VuptCYZuTXMM6OEPzjFabaIMIUVkkciHlUMcpKFfPnpf7XbBNqZCAKjt//9L99gc48dJRyO4pCrcpFnv6287d65UGnWjmcUJNQNBhEuh9k4esfEZuBNiYIz3Ouz7Wg5HQoT6v3i3J1X5LluWEcTK1G10T7UN+QrnklH4yUtx35yLp83B5/TGICo0Yq1QnARNbKhL5EXuwAO427XO65zzJ3Lh2ymUfrBY3bHO8NW4ykO7ZNDRdj/fsge1J8k4yaxeimQapDKs4XMhoNnKqUNPQYaiQzNPRoj9JwMvtvOH+WLJqEXHIc8RooWGkdo/SB7zp3q7OuHk6HRJM+AQVP3t0r3A1bVhHonUGlv1ApduM=\n  template:\n    metadata:\n      creationTimestamp: null\n      name: my-secret\n      namespace: gitlab-agent\n    type: Opaque\n```\n\nJust commit the `SealedSecret` and quickly start to watch for the event stream using `kubectl get events --all-namespaces --watch` to see when the sealed secret is unsealed and applied as a regular `Secret`.\n\n## Utility scripts\n\nIf you found the `kubeseal` command above to be quite complex, you can wrap it in a script.\n\n- Create `bin/seal-secret.sh` with the following content:\n\n```bash\n#!/bin/sh\n\nif [ $# -ne 2 ]\n  then\n    echo \"Usage: $0 ignored/my-secret.yaml output-dir/\"\n    echo \"This script requires two arguments\"\n    echo \"The first argument should be the unsealed secret\"\n    echo \"The second argument should be the directory to output the sealed secret\"\n  exit 1\nfi\n\n\nSECRET_FILE=$(basename $1)\n\nkubeseal --format=yaml --cert=sealed-secrets.pub.pem \u003C $1 > \"$2/SealedSecret.${SECRET_FILE}\"\n\necho \"Created file $2/SealedSecret.${SECRET_FILE}\"\n```\n\nThis script takes a path to a vanilla Kubernetes secret and an output directory, and tranforms your `Secret` into a `SealedSecret`.\n\n## Winding it up\n\nIn this article, we have seen how you can install Bitnami's Sealed Secret into your cluster and set it up for static secrets management. Please note the installation method provided here works for all the other 3rd party, off-the-shelf applications that can be deployed using Kubernetes manifests only.\n\n## What is next?\n\nIn the next article, we will see how you can access a Kubernetes cluster using GitLab CI/CD and why you might want to do it even if you aim for GitOps.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n",[530,1091,9],{"slug":3251,"featured":6,"template":680},"gitops-with-gitlab-secrets-management","content:en-us:blog:gitops-with-gitlab-secrets-management.yml","Gitops With Gitlab Secrets Management","en-us/blog/gitops-with-gitlab-secrets-management.yml","en-us/blog/gitops-with-gitlab-secrets-management",{"_path":3257,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3258,"content":3263,"config":3270,"_id":3272,"_type":14,"title":3273,"_source":16,"_file":3274,"_stem":3275,"_extension":19},"/en-us/blog/gitpod-desktop-app-personal-activities",{"title":3259,"description":3260,"ogTitle":3259,"ogDescription":3260,"noIndex":6,"ogImage":2010,"ogUrl":3261,"ogSiteName":667,"ogType":668,"canonicalUrls":3261,"schema":3262},"Why we built GitDock, our desktop app to navigate your GitLab activities","Life is full of moving parts. We get it. And that's why we created GitDock so you can keep track of all things GitLab right from your desktop.","https://about.gitlab.com/blog/gitpod-desktop-app-personal-activities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we built GitDock, our desktop app to navigate your GitLab activities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcel van Remmerden\"},{\"@type\":\"Person\",\"name\":\"Jeremy Elder\"}],\n        \"datePublished\": \"2021-10-05\",\n      }",{"title":3259,"description":3260,"authors":3264,"heroImage":2010,"date":3267,"body":3268,"category":743,"tags":3269},[3265,3266],"Marcel van Remmerden","Jeremy Elder","2021-10-05","\n\nKeeping track of everything that is happening in your GitLab projects and groups can be quite overwhelming. Often times you care about not only one project, but multiple ones. Even worse, these projects might even belong to different groups, making everything more complex.\n\nAs an example, product designers at GitLab might work on all of these different projects over the course of just one week:\n\n- [gitlab-org/gitlab](https://gitlab.com/gitlab-org/gitlab) (our product)\n- [gitlab-com/www-gitlab-com](https://gitlab.com/gitlab-com/www-gitlab-com) (our handbook)\n- [gitlab-org/gitlab-design](https://gitlab.com/gitlab-org/gitlab-design/) (space for discussions)\n- [gitlab-org/gitlab-services/design.gitlab.com](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com) (our design system)\n- [gitlab-org/ux-research](https://gitlab.com/gitlab-org/ux-research) (research studies)\n\n## User-centric vs. project-centric navigation\n\nOne of our product design managers ([@jackib](https://gitlab.com/jackib)) created a visualization that shows the current project-centric navigation model that we have in place.\n\n![Project-centric navigation](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/project-centric-navigation.png)\n\nThis model puts the burden of keeping track of your activities and the work you care about on the user. We would rather look for opportunities where we can enable a more user-centric navigation.\n\n![User-centric navigation](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/user-centric-navigation.png)\n\n## Why do we care about this?\n\nUsers already have different ways to stay up to date, for example email notifications, our \"to-dos,\" or custom systems they have set up for themselves. However, when we ran a UX research study, we noticed these tools often times only show a small subset of the things that users are curious about or the tools have to be checked multiple times during the day.\n\nA short summary of the main points we learned from this study:\n\n- Maintainers care about what happened to their project since they last looked at it.\n- Users repeatedly check their pipelines to see the results.\n- Often times users need to jump back into issues/MRs they have recently contributed to.\n\n## What is GitDock?\n\nGitDock is a desktop app you can install on your macOS/Windows/Linux machine (download [latest release](https://gitlab.com/mvanremmerden/gitdock/-/releases)). When installed, you will have an icon on your menu bar that brings up a small window.\n\n![GitDock](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/gitdock-window.png)\n\nFrom there you will have direct access to the following information:\n\n- The last pipelines you triggered\n- Your recently viewed GitLab objects (MRs, Issues, Epics, etc...)\n- Favorite projects\n- Your most recent comments\n- Bookmarked items\n\nGitDock also sends you a system notification whenever a pipeline completes, or when a new to-do was created for you.\n\nAll of these features try to put the user at the center. You can see me walk through all functionality in this overview video:\n\n[![YouTube video](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/gitdock-youtube.png)](https://www.youtube.com/watch?v=WkVS38wo4_w)\n\nYou can also see the entire code in our [GitDock](https://gitlab.com/mvanremmerden/gitdock) project and download the [newest release for your machine](https://gitlab.com/mvanremmerden/gitdock/-/releases). \n\n## Why didn't we make this part of our Web UI?\n\nThe main goal for GitDock is to help us learn how users want to navigate in this more user-centric approach. We decided to build this [minimum viable change (MVC)](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) in a separate product as it allowed us to move faster and use a few shortcuts, e.g. relying on the local browser history for the recently viewed items instead of storing these in our database. It also permitted us to cut some corners on performance as our API is not yet optimized for this approach. Here's one way example of how it's not optimized: getting the last pipeline you triggered requires three API calls to different endpoints.\n\nOne other advantage is that it gives us a space to test new ideas that we are curious about without having to fully commit to them (e.g. bookmarks).\n\n## What are the next steps?\n\nWe want to use the learnings and data from this project to help us [build a better start page for GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/225331). Right now this page is configurable and can show you different content, but almost 99% of users keep the default \"Your projects\" list as start page. We don't think users do this because it is truly the most useful option, and we want to create a better experience for this.\n\nThat's why we are still looking for feedback. Let us know what you think about GitDock and what other content would be helpful for you in a start page, or other navigation feature.\n",[9,700,1440],{"slug":3271,"featured":6,"template":680},"gitpod-desktop-app-personal-activities","content:en-us:blog:gitpod-desktop-app-personal-activities.yml","Gitpod Desktop App Personal Activities","en-us/blog/gitpod-desktop-app-personal-activities.yml","en-us/blog/gitpod-desktop-app-personal-activities",{"_path":3277,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3278,"content":3283,"config":3289,"_id":3291,"_type":14,"title":3292,"_source":16,"_file":3293,"_stem":3294,"_extension":19},"/en-us/blog/gitter-moves-to-element",{"title":3279,"description":3280,"ogTitle":3279,"ogDescription":3280,"noIndex":6,"ogImage":2893,"ogUrl":3281,"ogSiteName":667,"ogType":668,"canonicalUrls":3281,"schema":3282},"Gitter lands new home in Matrix with Element","Gitter’s new owner Element will continue to support and invest in the service and user communities.","https://about.gitlab.com/blog/gitter-moves-to-element","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Gitter lands new home in Matrix with Element\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eliran Mesika\"}],\n        \"datePublished\": \"2020-09-30\",\n      }",{"title":3279,"description":3280,"authors":3284,"heroImage":2893,"date":3286,"body":3287,"category":675,"tags":3288},[3285],"Eliran Mesika","2020-09-30","\n\nGitter, the open source [chat and networking platform](/blog/gitter-acquisition/), has been sold to secure chat application company [Element](https://element.io) which will continue to support and invest in the service and the user communities going forward.\n\nAs many of you are aware, GitLab has been focused this year on driving efficiency with a goal of extending our depth in our core product categories. That focus led us to look for a buyer for Gitter that could increase investment required to serve developers. With Element’s acquisition of Gitter, GitLab has more bandwidth to devote to our core business and Gitter will continue to have opportunities to thrive. \n\n“A great project chat is an essential element of most open source projects and Gitter is the leading open source solution,” says [Sid Sijbrandij](/company/team/#sytses), CEO, GitLab. “Under GitLab, Gitter’s community has grown to 1.7M users who have also contributed to improving the product for everyone. We are happy that Gitter will now have a fantastic home with Element. They have the momentum to quickly build on the success of Gitter to expand its footprint within the developer community and act as a driver for the rest of its business and the Matrix ecosystem. It’s a great home for Gitter.”\n\nElement is the company behind [Matrix](https://matrix.org), the open network for secure and decentralized communication with more than 20 million users. \n\n“It’s a privilege to have Gitter join Element,” says Matthew Hodgson, CEO and CTO at Element. “Gitter is the only chat platform focusing exclusively on developers, and we cannot wait to extend its reach to the entirety of the open Matrix network - confirming Matrix as an ideal home for open collaboration between software developers.”\n\nFounded in 2014, Gitter has operated as a standalone product, independent of GitLab, since 2017. Gitter is unique in its developer focus –  the tagline is “Where developers come to talk.” Used by large communities including [Node](https://gitter.im/nodejs/home), [TypeScript](https://gitter.im/Microsoft/TypeScript), [Angular](https://gitter.im/angular/home) and [Scala](https://gitter.im/scala/home), Gitter’s service is free, open source and has no commercial edition. A complete, indexed message history and robust integration [API](https://developer.gitter.im/docs/welcome) makes Gitter a perfect platform for managing communities around open-source projects. Gitter is available on the web with clients available for Mac, Windows, Linux, iOS, and Android.\n\n## Going forward\n\n[Element plans](https://element.io/blog/gitter-is-joining-element) to build out native Matrix connectivity, replacing the [matrix-appservice-gitter](https://github.com/matrix-org/matrix-appservice-gitter) bridge running since 2016. Over time, Gitter will effectively become a Matrix client.\n\nGet more details on this transition on the [Changelog podcast](https://cdn.changelog.com/uploads/podcast/414/the-changelog-414.mp3).\n\nMatrix has a [detailed Gitter transition plan](https://matrix.org/blog/welcoming-gitter-to-matrix) and welcomes [feedback on the process](https://matrix.to/#/#gitter:matrix.org).\n",[267,745,9],{"slug":3290,"featured":6,"template":680},"gitter-moves-to-element","content:en-us:blog:gitter-moves-to-element.yml","Gitter Moves To Element","en-us/blog/gitter-moves-to-element.yml","en-us/blog/gitter-moves-to-element",{"_path":3296,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3297,"content":3303,"config":3309,"_id":3311,"_type":14,"title":3312,"_source":16,"_file":3313,"_stem":3314,"_extension":19},"/en-us/blog/going-remote-education-virtual-learning-tips",{"title":3298,"description":3299,"ogTitle":3298,"ogDescription":3299,"noIndex":6,"ogImage":3300,"ogUrl":3301,"ogSiteName":667,"ogType":668,"canonicalUrls":3301,"schema":3302},"Going remote in education? Don't panic.","If you're an educator moving online, we have some tips for virtual learning success.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681183/Blog/Hero%20Images/work_remote_coffee_green.jpg","https://about.gitlab.com/blog/going-remote-education-virtual-learning-tips","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Going remote in education? Don't panic.\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2020-03-27\",\n      }",{"title":3298,"description":3299,"authors":3304,"heroImage":3300,"date":3306,"body":3307,"category":808,"tags":3308},[3305],"Christina Hupy, Ph.D.","2020-03-27","\n\nCampuses around the world in both K-12 and higher education are moving to virtual models of instruction and operation to help reduce the spread of COVID-19. As a result, many faculty, students, staff and leadership are now in the position of navigating how to work, teach, and learn remotely with little to no preparation time and even fewer resources.\n\nJumping into virtual education – voluntarily or otherwise – is not easy. Properly developing an online curriculum takes months and months of work, a coordinated tech stack, and a well-defined communication plan. Intentionally, online courses have IT staff to assist in the process of converting classes and generally only convert one at a time.\n\nAre you an educator facing a suddenly digital classroom? Are you worried about answering endless emails from panicked students? Dreading spending hours upon hours recording lectures? Wondering how you will be able to effectively communicate with all your students?\n\nAs the world’s largest all-remote company with 1,200+ employees in 65 countries, GitLab has a wealth of resources to help navigate this challenge! The GitLab [remote work emergency plan]( /company/culture/all-remote/remote-work-emergency-plan/) can be adapted to help both students and educators get up and running quickly and function effectively in this new reality.\n\nWe're excited to share a few immediately actionable tips for faculty, staff, and students who’ve been suddenly thrust out of the classroom and into a virtual education model.\n\n\n\n## Tip 1: Adopt a single source of truth\n\nWhile this term is pretty self-explanatory, it can’t be emphasized enough. When an entire company of people have to be on the same page, it is essential that everyone knows exactly what needs to happen, how it happens, and when it should happen. This same concept applies directly to an online class.\n\nImagine you need to make a change in your class agenda for a project due tomorrow – where does that update need to appear? A syllabus schedule, due dates on folders, due dates on assignments, class calendars, and of course via email, etc. Chances are, you’ll miss making the update in one of those locations. Confusion and lots of emails with questions will ensue! (see [Tip 2](#tip-2-leverage-a-transparent-communication-tool)).\n\nThe concept of a [single source of truth](https://handbook.gitlab.com/handbook/values/#single-source-of-truth) (SSoT) that serves as a living record has many benefits in a classroom setting. Students need a SSoT in order to build trust, confidence, and be successful in a course, especially when they are used to the reassurance of seeing teachers several times a week. A SSoT also minimizes the number of questions about logistics and allows you to spend more time discussing the content itself.\n\n### How to adopt a single source of truth\n\n* Identify a tool (see [Tip 2](#tip-2-leverage-a-transparent-communication-tool)) that serves as the SSoT and document all relevant information such as due dates, schedules, directions, policies, etc. in this single location.\n* Avoid the temptation to list dates and policies on multiple documents such as calendars and assignments.\n* Update the SSoT as needed. As students ask questions, add the answers to the SSoT. This approach will save you time in answering questions down the road.\n* You will need to adjust as the cadence of the course develops, especially if this is your first time teaching it online.\n* Make sure students know that they will only need to look in this one location for any changes.\n\n## Tip 2: Leverage a [transparent communication tool](/company/culture/all-remote/remote-work-emergency-plan/#establish-a-handbook)\n\nIf you are put in the situation of having to migrate to remote quickly, you probably don’t have much time to invest in a complicated setup.  Don’t worry, you can implement this tip by starting simple.\n\nFirst things first, the tool should not be email. Email is one of the most inefficient methods of communication for remote work. You and your students are better served when information is shared out in a way that everyone has the same knowledge. Reducing email’s allure will save you and your students time and energy.\n\n### [What do we recommend instead?](/company/culture/all-remote/remote-work-emergency-plan/#establish-a-handbook)\n\n* A cloud-based word processor, such as Google Docs, is a great tool to get started with an SSoT. Ensure that you can easily update the document without downloading, uploading, and changing formats..\n* We recommend using a tool that allows for live editing, so making changes is very simple and easy.\n* Adding timestamps to track updates can also be helpful so students know they are looking at the most recent information.\n\n### What other tools do I need?\n\nBe sure to keep the [tech stack simple](/company/culture/all-remote/remote-work-emergency-plan/#minimize-your-tool-stack) and make sure everyone knows when to use which tool for each kind of communication.\n* For live meetings, we use [Zoom](https://zoom.us/) but whatever video conferencing tool your institution has will work.\n* For informal communication we use Slack, but there are other tools available such as Microsoft Teams.\n* If you need a more visual collaboration tool, consider using a tool such as [Mural](https://mural.co/).\n\nLet’s consider an example of how, when taken together, this approach can improve the experience for everyone in a remote environment whether teaching or learning.\n\n* A student asks a good question on the informal chat tool. You update the SSoT and direct the student to the answer there. Now students who may have the same question can see the response and know where to find the answer.\n* A student asks a question that is already in the SSoT. You direct the student to the correct link, thus minimizing the time it takes to answer the question.\n* A student asks a question that has been asked multiple times before. Private message the student, provide the SSoT link and suggest that he looks at the thread for answers in the future so he/she doesn’t need to wait for individualized responses.\n\n## Tip 3: Establish a [communication plan](/company/culture/all-remote/remote-work-emergency-plan/#establish-a-communications-plan)\n\nFor the first time in years, there’s no school bell ringing hourly or class schedule to keep everything on track! We recommend that you start by thinking about – and enjoying – **[asynchronous communication](/company/culture/all-remote/asynchronous/)** and then identify the [tool(s)](/company/culture/all-remote/remote-work-emergency-plan/#minimize-your-tool-stack) that you will use.\n\nFirst, let’s explore **asynchronous communication**. Working asynchronously removes the temptation to find a time that works for everyone and ensures that people who can’t make it to a specific event aren’t left out of the loop.\n\nIt is possible to strike a balance between providing key information in a self-service model while at the same time allowing for teams to ask questions and have discussions. Adopting a self-service model means that all content and relevant information is provided in a manner that students can easily find, read, and digest on their own ahead of time. With this approach, students can decide how much time to spend digging into the content according to their own needs and schedules.\n\nRecording a set of lectures ahead for an entire class, yet alone several classes' worth, is very daunting. Approaching lectures with an asynchronous communication style can help ease the burden on educators and at the same time provide effective mechanisms for discussion.\n\n### How can lectures be asynchronous?\n\n* [Have an editable agenda](/company/culture/all-remote/meetings/#have-an-agenda)  for the actual lecture discussion where students can post their name and a question in a numbered list.\n* Host a live video discussion where students voice their question(s) in the order on the agenda. If they aren’t present, read the question for them.\n* Answer the questions in the meeting and make sure someone is taking great notes. [Document everything](/company/culture/all-remote/meetings/#document-everything-live-yes-everything).  Try the [‘everyone is a moderator’](/handbook/communication/#everyone-is-a-moderator) concept to help run these meetings effectively.\n* Consider live-streaming the video directly to YouTube. This saves time and does not require you to download the recording, process it, and then upload back to your learning management system. The videos will be available on YouTube afterwards as well.\n* For more information, check out GitLab’s guide on [how to run remote meetings right](/company/culture/all-remote/meetings/#how-do-you-do-all-remote-meetings-right). You can also check a [recent example of a meeting livestream](https://www.youtube.com/watch?v=KMvrb0M3fFA) and an agenda to match (see below).\n\n![Agenda screenshot](https://about.gitlab.com/images/blogimages/group-convo-agenda.jpg){: .shadow.medium.center}\nA GitLab editable agenda after a meeting\n{: .note.text-center}\n\n## Tip 4: [Devote time to fostering relationships](/company/culture/all-remote/informal-communication/#devote-time-to-fostering-relationships)\n\nIn all-remote environments, there should be a greater emphasis placed on carving out time to get to know one another as humans. To connect and bond as empathetic beings with interests, emotions, fears, and hopes – people, not just colleagues or classmates. This tip is especially useful when transitioning from an in-person to an online setting. Your students are probably already a bit stressed, overwhelmed, and missing in-person, in-classroom connections.\n\n### How can you foster a sense of community with your online class?\n\n#### Try creating some fun channels in your online chat tool\n\nGitLab has channels that are all business as well as a set of channels for fun topics such as cooking, fitness, and dogs. People who have similar interests will connect and share experiences, photos, recipes etc. Students who connect over their puppies or a great recipe are more likely to help eachother out with questions or study together.\n\n#### Consider starting your video conference five minutes early with a conversational slide as a starter\n\nStudents arriving early can chit-chat just as they may have done in person. It might take a few meetings and some encouragement to get the ball rolling, but they’ll soon look forward to this opportunity to connect with classmates.\n\n![GitLab marketing team Show & Tell social call](https://about.gitlab.com/images/all-remote/marketing-social-call-show-and-tell.jpg){: .shadow.medium.center}\nA GitLab marketing team Show & Tell social call\n{: .note.text-center}\n\n#### Hold your office hours over a video conference\n\nStudents will be able to ask questions and have discussions, allowing them to build on the relationship with you and others they started in the in-person classroom.\n\n#### Try breakout groups\n\nThese are a great way to give students who may be less likely to speak up in a large group a chance to connect in a smaller setting.\n\n#### Consider hosting an **“Ask Me Anything”** meeting\n\nThese meetings are open times when students can ask a variety of questions. The questions could be anything from career advice, to sharing thoughts on research projects, course advising etc. It doesn’t have to be all business either.\n\n#### Encourage group conversation rather than 1:1 wherever possible\n\nThis helps to foster relations. We have some [guidelines that encourage collaboration through group communication](/handbook/communication/#avoid-direct-messages).\n\n#### There are some cases where you may need to discuss something 1:1 with a student\n\nWe recommend clearly outlining when to use group and private conversations in your SSoT.\n\nAdopting some of these strategies for remote teaching and learning is fairly easy. In our experience at GitLab, we find that team members enjoy and respect the independence this way of working affords them.  Students want to be engaged, and encouraging them to contribute by asking questions and taking collective notes themselves will allow them to contribute directly. Start small and go from there.\n\nWe hope this information helps make the transition a little bit easier and challenges some conventions in the long term! To learn more about the GitLab Education Program read our blog post [How to bring GitLab to a classroom near you](/blog/bring-gitlab-to-classroom-nearyou/).\n\nCover image by [Djurdjica Boskovic](https://unsplash.com/@escape_from_reality) on [Unsplash](https://unsplash.com/photos/G8_A4ZWxE3E)\n{: .note}\n",[677,9,832],{"slug":3310,"featured":6,"template":680},"going-remote-education-virtual-learning-tips","content:en-us:blog:going-remote-education-virtual-learning-tips.yml","Going Remote Education Virtual Learning Tips","en-us/blog/going-remote-education-virtual-learning-tips.yml","en-us/blog/going-remote-education-virtual-learning-tips",{"_path":3316,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3317,"content":3323,"config":3329,"_id":3331,"_type":14,"title":3332,"_source":16,"_file":3333,"_stem":3334,"_extension":19},"/en-us/blog/group-conversation-podcast",{"title":3318,"description":3319,"ogTitle":3318,"ogDescription":3319,"noIndex":6,"ogImage":3320,"ogUrl":3321,"ogSiteName":667,"ogType":668,"canonicalUrls":3321,"schema":3322},"How we turn our group conversations into a podcast with GitLab CI/CD","Want to listen to meetings on the go? Senior SRE John Jarvis explains how he turned his favorite remote meetings at GitLab into podcast format.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678626/Blog/Hero%20Images/group-conversation-podcast.jpg","https://about.gitlab.com/blog/group-conversation-podcast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we turn our group conversations into a podcast with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jarvis\"}],\n        \"datePublished\": \"2019-07-03\",\n      }",{"title":3318,"description":3319,"authors":3324,"heroImage":3320,"date":3326,"body":3327,"category":743,"tags":3328},[3325],"John Jarvis","2019-07-03","\n[Group conversations](/handbook/group-conversations/) are my favorite remote meetings at\nGitLab because they are a great way to get an inside peek at what different teams are doing,\nhow they collaborate, and what features you might find in future GitLab releases.\nYou may already know that we have been livestreaming these on\n[GitLab Unfiltered](https://www.youtube.com/channel/UCMtZ0sc1HHNtGGWZFDRTh5A) for anyone curious about how GitLab operates.\n\nLately, when I have time to listen to these unfiltered discussions I am either not at a screen or not in a place\nwhere it is easy to watch a video. After seeing how [Support turned their weekly meeting into a podcast](/blog/how-we-turned-40-person-meeting-into-a-podcast/),\nI thought it would be nice to make the GitLab group conversation meetings into a podcast as well!\n\n[Subscribe to the GitLab Group Conversations podcast](https://gitlab-com.gitlab.io/gl-infra/podcasts/#podcasts)\n{: .alert .alert-gitlab-purple .text-center}\n\nNow in addition to the livestreams and videos, there is a podcast feed for GitLab group conversations.\nListen to these conversations on your favorite podcast player by accessing the feed on\n[the Group Conversations podcast page](https://gitlab-com.gitlab.io/gl-infra/podcasts/#podcasts).\n\nIf you like the format, please let us know by tweeting us [@GitLab](https://twitter.com/gitlab)\nand we will consider adding more!\n\n### Here is a bit more detail about how these podcasts are generated\n\n* Teams that livestream group conversations\n  [follow instructions  for broadcasting it live](/handbook/group-conversations/#livestream-the-video)\n  and creating the video. When the meeting is over, the video is made available on GitLab Unfiltered.\n\n* A daily GitLab CI job in the [podcasts project](https://gitlab.com/gitlab-com/gl-infra/podcasts)\n  downloads the group conversation videos and converts them to audio files. It's easy to create [pipeline schedules in GitLab](https://docs.gitlab.com/ee/ci/pipelines/schedules.html).\n\n  ![The podcast schedule](https://about.gitlab.com/images/blogimages/podcast-schedule.png){: .shadow.medium.center}\n\n* An RSS feed is generated and audio files are uploaded to object storage from the CI job\n\n* GitLab pages is used to host a static site to link to the feed\n\n* This is all automated in a CI pipeline that runs every hour!\n\n![Podcast pipelines](https://about.gitlab.com/images/blogimages/podcast-pipeline.png){: .shadow.medium.center}\n\nI hope you have the opportunity to tune into the group conversations at GitLab and\nalso take advantage of GitLab CI features like schedules to help automate your own\nworkflows!\n\nPhoto by [Lee Campbell](https://unsplash.com/@leecampbell?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/headphones?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,832,109],{"slug":3330,"featured":6,"template":680},"group-conversation-podcast","content:en-us:blog:group-conversation-podcast.yml","Group Conversation Podcast","en-us/blog/group-conversation-podcast.yml","en-us/blog/group-conversation-podcast",{"_path":3336,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3337,"content":3343,"config":3348,"_id":3350,"_type":14,"title":3351,"_source":16,"_file":3352,"_stem":3353,"_extension":19},"/en-us/blog/hey-data-teams-we-are-working-on-a-tool-just-for-you",{"title":3338,"description":3339,"ogTitle":3338,"ogDescription":3339,"noIndex":6,"ogImage":3340,"ogUrl":3341,"ogSiteName":667,"ogType":668,"canonicalUrls":3341,"schema":3342},"Hey, data teams - We're working on a tool just for you","Meltano is an open source tool for the entire data science lifecycle, and we want your contributions and feature requests!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678726/Blog/Hero%20Images/hey-data-analysts-we-are-working-on-a-tool-just-for-you.jpg","https://about.gitlab.com/blog/hey-data-teams-we-are-working-on-a-tool-just-for-you","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Hey, data teams - We're working on a tool just for you\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2018-08-01\",\n      }",{"title":3338,"description":3339,"authors":3344,"heroImage":3340,"date":3345,"body":3346,"category":299,"tags":3347},[3134],"2018-08-01","\nGitLab as a company faces a challenge shared by many — we have lots of data for our engineering organization (via GitLab, our single data store for that part of the company), but there are key gaps in how we understand the effectiveness of business operations. [Meltano](https://gitlab.com/meltano/meltano/tree/master) was created to help fill the gaps by expanding the common data store to support Customer Success, Customer Support, Product teams, and Sales and Marketing.\n\n### What is Meltano?\n\nMeltano aims to be a complete solution for data teams — the name stands for model, extract, load, transform, analyze, notebook, orchestrate — in other words, the data science lifecycle. While this might sound familiar if you're already a fan of GitLab, Meltano is a separate product. Rather than wrapping Meltano into GitLab, Meltano will be the complete package for data people, whereas GitLab is the complete package for software developers.\n\n### What problem does it solve?\n\nThe GitLab Data and Analytics team is [charged](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/source/job-families/finance/manager-data-and-analytics/index.html.md) with getting data from our external sources, presenting it in a usable format to business users across the company, and eventually making predictions from the data. As is the case with many data teams, we currently do this with a series of steps and separate tools, and we're not yet at the level of process and stability that is commonplace in software development. The idea of bringing best practices from software development to data analytics is a huge draw for the Data team at GitLab. Ideally, all of our work could be done in open source tools, and could be version controlled, and we’d be able to track the state of the analytics pipeline from raw data to visualization.\n\nThe endgame for Meltano involves making analytics accessible to everyone, not just professional data wranglers. GitLab Data Analyst Emilie Burke explains a common scenario: \"There are whole swathes of small and medium size companies that don’t really have data and analytics because they don’t have engineers on their team. The reports that they get are through whatever tools they are using. When they’re dependent on these siloed data sources, you can’t track cross-functional efforts. For example, if you’re doing a giveaway, you might see a bunch of new email signups piping into Mailchimp. But you won't be able to see if those users are then buying things in Shopify. Unless there's a native integration, you can’t relate that data to any other data source.\"\n\nManaging the integrations you currently have comes with its own challenges. Senior Product Manager Joshua Lambert shares, \"The difficulty of hooking up Salesforce and Marketo to see if a marketing campaign was successful is non-trivial. Often money is spent and the question is, 'Was it worth it?'\" As an open source tool, we think Meltano will make a big difference for teams without much money to invest in data analytics. It's a new field for many organizations, and we want to do everything we can to make it easier for teams and business to access their data and make better decisions. We talked more about this during a recent Q&A, which you can watch below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/nIYMNIvKLcY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### How can I contribute?\n\nMeltano is open source! You can check out the plan for an MVC [here](https://gitlab.com/meltano/meltano/issues/10). There are many different areas where people can contribute, including Meltano Analysis (the UI), Extractors, and Loaders. Meltano currently only supports Postgres (with Snowflake on the way!), but will need to support many different database types, so any contributions writing Loaders for one of those would be very welcome. You can make requests by opening an [issue](https://gitlab.com/meltano/meltano/issues/) and labeling it `feature request`.\n\nReaders are also extremely welcome to check out the [Data team's work](https://gitlab.com/meltano/analytics/) and suggest ways we can improve! We know some aspects of how we do analytics and data science are not where they should be. If you don’t think we’re using the right primitives or going about something the wrong way, we’re all ears!\n\n### How can I keep up with the Data Team and Meltano?\n\nThe best way to get in touch about Meltano or the Data team is to [open an issue](https://gitlab.com/meltano/meltano/issues/)! We also publish all of our team calls and working sessions on our brand new [YouTube channel](https://www.youtube.com/channel/UCmp7zJAZEC7I_n9BEydH8XQ), and you can learn more about the team, view our work in GitLab, and follow us on social:\n\n* [Jacob Schatz](https://about.gitlab.com/company/team/#jakecodes), Staff Developer, Meltano\n* [Yannis Roussos](https://about.gitlab.com/company/team/#iroussos), Senior Developer, Meltano Specialist\n* [Alex Zamai](https://about.gitlab.com/company/team/#AlexZamai), Developer, Meltano\n* [Micaël Bergeron](https://about.gitlab.com/company/team/#micaelbergeron), Developer, Meltano\n* [Joshua Lambert](https://about.gitlab.com/company/team/#joshlambert), Senior Product Manager, Package, Monitor, Distribution\n* [Taylor A. Murphy, PhD](https://about.gitlab.com/company/team/#tayloramurphy1), Manager, Data and Analytics\n* [Emilie Schario](https://gitlab.com/emilie), Data Analyst\n* [Thomas La Piana](https://gitlab.com/tlapiana), Data Engineer\n* [Chase Wright](https://about.gitlab.com/company/team/#thechasewright), Finance Operations and Planning\n\n_[Emily von Hoffmann](/company/team/#emvonhoffmann) contributed to this post._\n\nPhoto by [Jefferson Santos](https://unsplash.com/photos/9SoCnyQmkzI) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9],{"slug":3349,"featured":6,"template":680},"hey-data-teams-we-are-working-on-a-tool-just-for-you","content:en-us:blog:hey-data-teams-we-are-working-on-a-tool-just-for-you.yml","Hey Data Teams We Are Working On A Tool Just For You","en-us/blog/hey-data-teams-we-are-working-on-a-tool-just-for-you.yml","en-us/blog/hey-data-teams-we-are-working-on-a-tool-just-for-you",{"_path":3355,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3356,"content":3362,"config":3368,"_id":3370,"_type":14,"title":3371,"_source":16,"_file":3372,"_stem":3373,"_extension":19},"/en-us/blog/high-availability-git-storage-with-praefect",{"title":3357,"description":3358,"ogTitle":3357,"ogDescription":3358,"noIndex":6,"ogImage":3359,"ogUrl":3360,"ogSiteName":667,"ogType":668,"canonicalUrls":3360,"schema":3361},"Meet Praefect: The traffic manager making your Git data highly available","This router and transaction manager ensures there are multiple copies of each Git repository available in the event of an outage – no NFS required.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669204/Blog/Hero%20Images/traffic-intersection.jpg","https://about.gitlab.com/blog/high-availability-git-storage-with-praefect","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet Praefect: The traffic manager making your Git data highly available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Zeger-Jan van de Weg\"}],\n        \"datePublished\": \"2021-01-21\",\n      }",{"title":3357,"description":3358,"authors":3363,"heroImage":3359,"date":3365,"body":3366,"category":743,"tags":3367},[3364],"Zeger-Jan van de Weg","2021-01-21","\nAs critical software projects grow, scaling infrastructure to make the service [highly available](https://en.wikipedia.org/wiki/High_availability) is key. At GitLab, our biggest struggle in scaling was right in our name: Git.\n\n## The trouble with scaling Git\n\nGit is software that is distributed, but not usually run in a ‘highly available cluster,’ which is what GitLab needs. At first, we solved this with a [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions), NFS – which exposes a shared filesystem across multiple machines and generally worked. As we’d soon find out, most NFS appliances were for bulk storage and not fast enough. This led to problems with GitLab’s Git access being slow.\n\nTo solve the speed problem we built [Gitaly, our service that provides high-level RPC access to Git repositories](https://docs.gitlab.com/ee/administration/gitaly/). \n\nWhen we started with [Gitaly v1.0](/blog/the-road-to-gitaly-1-0/), our goal was to remove the need for a network-attached filesystem access for Git data. When that was complete, the next problem to tackle was that all your data is only stored once. So, if you have a server down, or your hard disk dies, or something happens to this one copy, you're in deep trouble until a backup is restored. This is an issue for GitLab.com, but it’s also a big risk for our customers and community.\n\nBack at our [Summit in Cape Town](/company/culture/contribute/previous/#summit-in-cape-town-south-africa) in 2018, the Gitaly team (at the time, that was [Jacob Vosmaer](/company/team/?department=all#jacobvosmaer-gitlab) and me) and some other engineers discussed pursuing a fault-tolerant, highly available system for Git data. For about a month we went back and forth about how we would go about it – ranging from wild ideas to smaller iterations towards what we want. The challenge here was that the ultimate aim is always going to be 100% availability, but you’re never going to make that. So let's aim for a lot of nines (three nines being 99.9%, five being 99.999%, etc.) Ideally, we'd be able to iterate to 10 nines if we wanted to. \n\nEventually we chose the design of a proxy: introduce a new component in the GitLab architecture, which is Praefect, and then route all the traffic through it to Gitaly storage nodes to provide a [Gitaly Cluster](https://docs.gitlab.com/ee/administration/gitaly/praefect.html). Praefect inspects the request and tries to route it to the right Gitaly backend, checks that Gitaly is up, makes sure the copies of your data are up to date, and so on. \n\n## First iteration: Eventual consistency\n\nTo cut the scope, for our first iterations we settled on eventual consistency, which is fairly common – we even use it for some GitLab features. With Git data, if we are behind a minute, it's not a big deal because at GitLab at least 90% of operations on our Git data are just reads, compared to a very small volume of writes. If I run `git pull` and I'm one commit behind master, that's not ideal, but not a deal breaker in most cases. \n\nWith eventual consistency, each repository gets three copies: one primary and two secondary. We replicate your data from the primary to the other copies, so that if your primary is inaccessible, we can at least give you read access to the secondary copies until we recover the primary. There’s a chance the secondaries are one or two commits behind your primary, but it’s better than no access.\n\nWe rolled this out in [13.0](/releases/2020/05/22/gitlab-13-0-released/#gitaly-cluster-for-high-availability-git-storage) as generally available. \n\n## Strong consistency\n\nThe next stage was to work on strong consistency, where all of your three copies are always up to date. \n\nWhen you write to your Git repository, there’s a moment where Praefect says, “OK, I'm going to update branch A from #abc to #cbd.” If all three copies agree on the updates, then Praefect tells everyone to apply this update and now, almost at the same moment in time, they'll update the data to the same thing. Now you've got three copies that are up to date.\n\nSo, if one copy is offline for some reason – let’s say a network partition, or the disk is corrupted – we can serve from the other two copies. Then the data remains available, and you have more time to recover the third copy as an admin. Effectively, while you always have a designated primary, it's actually more like having _three_ primaries, because they are all in the same state. \n\nIf the default state of a system is consistent it requires maintaining this consistency on each mutation to the data that's performed. All possible requests to Gitaly are grouped into two classes: mutators and accessors. Meaning that there was a risk we had to migrate each mutator RPC individually. That would've been a major effort, and if possible, we wanted to push this problem to Git. Gitaly uses Git for the majority of write operations, and was thus the largest common denominator.\n\nSo Git had to become aware of transactions, which ideally isn't part of Git. There are more areas where it would be nice if Git was aware of business logic, but if we're honest with ourselves, it's not really Git's concern: authentication and authorization. At GitLab we use [Git Hooks](https://git-scm.com/docs/githooks.html#_hooks) for that. So the idea [applied and contributed](https://public-inbox.org/git/1de96b96e3448c8f7e7974f7c082fd08d2d14e96.1592475610.git.ps@pks.im/T/#m9ae42f583968aa1d8ca43bd3007333cf51a618cc) (thanks, [Patrick Steinhardt](/company/team/#pks-gitlab)!) was the same: when events happen with Git, execute a hook and allow Gitaly to execute business logic. Through the exit code of the hook, Git is signaled on how to proceed. In Git, these events are updates of any reference (for example, branches or tags). When this happens Git will then allow Gitaly to participate in a [three-phase commit](https://en.wikipedia.org/wiki/Three-phase_commit_protocol) transaction by communicating back to Praefect, and enforce consistency. So we got that released in Git, fixed a bug, and now we’re [rolling it out to almost all write requests](https://gitlab.com/gitlab-org/git/-/issues/79).\n\n## A defensible cost increase\n\nNow strong consistency is great, but we are effectively asking our customers, “Instead of one copy, why don't you triple your storage costs and your server costs and whatnot, and you have zero benefits unless something goes wrong.” That wasn't really appealing for most customers, but now we’ve sweetened the deal with increased performance and making the cost increase more manageable. \n\nSo, if you have three copies of your data that are up to date, then all of them could serve any request that doesn't mutate the data, right? Because you know they're up to date. Right now, [Pavlo](/company/team/?department=gitaly-team#8bitlife) is working on [read distribution, which we are making generally available in 13.8](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/71960) (coming Jan. 22, 2021). [We rolled it out briefly before](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/58694), but it didn’t scale as expected, so we’ve worked with QA to mitigate that.\n\nRight now, Praefect is rolled out to a very limited subset of projects on GitLab.com, because running it is expensive already. When I first proposed rolling it out for everyone, it was very quick to calculate that that will triple our Gitaly Clusters – not within the budget at all! So we're trying to iterate towards that goal. The first step is to work on allowing a [variable replication factor](https://docs.gitlab.com/ee/administration/gitaly/praefect.html#variable-replication-factor). It can be expensive to store a lot of data multiple times, so why don't we make it so that you can store some repositories three times and some just one time, and you don't get the guarantees and the availability of those with three copies.\n\n## Challenges and lessons learned\n\nSo we have Praefect, this new component, but it's not installed by default on GitLab Omnibus –\nyou have to enable it yourself. The [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit) uses it as well as the tests on GitLab.com, for GitLab projects, but that wasn’t always the case. When you have an optional part in your architecture, if you’re debugging or talking with customers, there is the additional mental burden of verifying what the architecture looks like. Without it, you can make much quicker assumptions on what's going on and why it's working or why it isn't. Officially, we have deprecated NFS, so it makes sense to make it a required component so we can depend on it being there.\n\nAlso, as we add more features to Praefect, if it’s still optional then some customers get those added benefits and some don’t.\n\n### We should have put it in production sooner\n\nOur first iteration was just proxying the traffic, doing nothing with it, and verifying that it works. We didn't put it in production because it offered nothing to the community. But, it includes new components in your architecture, which our SREs need to know about, and there were a couple of bugs we found out much later. I was hesitant to put something in production that didn't offer anything in return, but if we’d been a little more aggressive with putting it out there – even just for a small subset of projects – we would understand more quickly what we're running, what was working, and what wasn't. \n\n### Applying big architectural changes takes time\n\nIf you ask customers to make giant architectural changes, it's going to take longer than you think. When we released Praefect and Gitaly Clusters in 13.0, it was fairly rough around the edges and some things weren't working as you would expect, but it was a good time to release because now, six months later, we see customers finally starting to implement it. They want to validate, try it out on a subset, and then finally roll it out for their whole GitLab instance. While that took longer than I expected, it's cool to see the numbers going up now, and adoption is growing quite rapidly.\n\n## More than just a traffic manager\n\nPraefect does much more than just inspect the traffic. If Gitaly goes down, ideally you want to notice that before you actually fire a request, which Praefect does. It does failover, so if one fails and it was designated as a primary, then it fails over to a secondary, which is now designated as a primary. \n\nI'm really excited for the next few years and the kind of things we are planning to build in Praefect and what that will deliver to GitLab.com and our customers and community. Where before we didn’t have very granular control over what we were doing or why we were doing it, now we can intercept and optimize.\n\n## What's next\n\nWe're shipping [HA Distributed Reads](https://gitlab.com/gitlab-org/gitaly/-/issues/3334) in GitLab 13.8 (Jan. 22, 2021). For 13.9, we're shooting for [strong consistency in the Gitaly Cluster](https://gitlab.com/groups/gitlab-org/-/epics/1189) and [variable replication factor](https://gitlab.com/groups/gitlab-org/-/epics/3372).\n\nFor GitLab self-managed users, consider enabling Praefect if you have high availability requirements. Visit our [Gitaly Clusters documentation](https://docs.gitlab.com/ee/administration/gitaly/praefect.html) to get started.\n\n_Major thanks to [Rebecca Dodd](/company/team#rebecca) who contributed to this post._\n\nCover image by [Yoel J Gonzalez](https://unsplash.com/@yoeljgonzalez?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText\") on [Unsplash](https://unsplash.com/s/photos/traffic?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText)\n{: .note}\n",[722,1297,9,231,745],{"slug":3369,"featured":6,"template":680},"high-availability-git-storage-with-praefect","content:en-us:blog:high-availability-git-storage-with-praefect.yml","High Availability Git Storage With Praefect","en-us/blog/high-availability-git-storage-with-praefect.yml","en-us/blog/high-availability-git-storage-with-praefect",{"_path":3375,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3376,"content":3382,"config":3388,"_id":3390,"_type":14,"title":3391,"_source":16,"_file":3392,"_stem":3393,"_extension":19},"/en-us/blog/high-efficiency-innovation",{"title":3377,"description":3378,"ogTitle":3377,"ogDescription":3378,"noIndex":6,"ogImage":3379,"ogUrl":3380,"ogSiteName":667,"ogType":668,"canonicalUrls":3380,"schema":3381},"High-efficiency innovation: 3 lessons to learn from GitLab's culture of rapid execution","Guest author Jay Newman recently shadowed our CEO to discover how we move so quickly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680169/Blog/Hero%20Images/high-efficiency-innovation.jpg","https://about.gitlab.com/blog/high-efficiency-innovation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"High-efficiency innovation: 3 lessons to learn from GitLab's culture of rapid execution\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jay Newman\"}],\n        \"datePublished\": \"2018-03-27\",\n      }",{"title":3377,"description":3378,"authors":3383,"heroImage":3379,"date":3385,"body":3386,"category":808,"tags":3387},[3384],"Jay Newman","2018-03-27","\n\nAll companies have different ways of creating new products and services. Despite that, there are a few patterns that show up consistently. At [Jump](http://www.jumpassociates.com), we like to call those patterns the different \"cultures\" of innovation. One such pattern has to do with execution. Great executors (like GE and FedEx) are masters of sharp focus and efficient machine-making.\n\nMany of the Fortune 500 companies that we work with do their best innovation this way. They've built infrastructure that excels at launching products globally, coordinating thousands of employees and operating at massive scale. These companies often ask us what they can learn from what's going on in Silicon Valley. There's much to learn, of course, from the startups and entrepreneurial ecosystem here.  \n\nThe important question is not \"How do they do things in Silicon Valley?\" Instead, it's \"What can I learn that would work well in my organization?\" It's always exciting to come across a startup that's doing what these big companies do best – execute at scale – and doing it in a completely different way.\n\nGitLab is one such company. They're an open source software company powering many of the world's largest corporations. They've developed a surprising – and strong – culture of innovation. They're a remote-only company. There's no physical headquarters or office space for their 200+ employees located worldwide. They proudly admit that they value \"boring solutions.\" Their [entire business strategy is available](/company/strategy/) for the public and their competitors to see. They're respected for their [product](/blog/gitlab-leader-continuous-integration-forrester-wave/), their [culture](/company/culture/), and their [results](http://www.businessinsider.com/gitlab-raises-20-million-from-gv-2017-10).\n\nMany companies pride themselves on their ability to iterate quickly and answer yes/no decisions rapidly. Even they might be surprised at the scope and scale of GitLab's efficiency. GitLab drives high-efficiency innovation through a culture of rapid execution. They weave speed directly into the fabric of who they are and what they do. Do you want to learn how they do it? I recently shadowed GitLab's CEO, [Sid Sijbrandij](/company/team/#sytses), and his team for a day.\n\nHere's how they make it happen.\n\n## When the answer is clear, build for speed. Speed wins.\n\n*Why build a culture of rapid execution?*\n\nWith such a unique team culture and set of business practices, the first thing I wanted to learn from Sid was why GitLab operates the way it does. What became clear was that it's all very intentional.\n\nA few key beliefs are central to the decisions they've made:\n\n### Belief 1: The solution required to win is already super clear to everyone.\n\nThey're operating in a market called DevOps, which is about creating platforms and tools for software developers to use in their work. It's a market where both the unmet customer need and the ideal solution are clear to everyone.\n\nThey were newer to the game than some brand name and legacy competitors, so they chose to prioritize speed over invention to get to the finish line first.\n\n### Belief 2: If you don't do anything new, you can do things faster, bigger and better.\n\nThe folks at GitLab believe that it's better to be boring. They value \"boring solutions.\" It's not because boring is better in and of itself. It's because boring is efficient. It's faster. And faster can become bigger. And when you add in collaboration with a global open source community, bigger can become better.\n\nIf there's a market standard, they don't try to create something different. They get on board. As Sid says, \"It's about convention over conviction. We make sure everyone [in the open source community] is enticed to participate. If the rest of the world is doing it in some way, we should be doing it in that way.\"\n\n### Belief 3: It's OK not to make everyone happy.\n\nIt's hard for most companies – and most people – to change to what made them successful in the first place. For GitLab, making those kinds of changes is critical to achieving the growth they seek. So on a daily basis, they choose to act quickly, make mistakes quickly, and learn from those mistakes quickly.\n\nThat can lead to decisions – big and small – that might not make everyone happy.\n\nWhen they launch a completely new version of GitLab (they're on version [10.6](/releases/2018/03/22/gitlab-10-6-released/) right now), they always add some things that will frustrate some existing customers, and they often take away things that other customers love.\n\n\"There's way more people not using GitLab than that are. So we should always optimize for those future customers, not your current ones. That's why companies slow down. They start listening. Engineers want to fix the current bugs. Sales wants to keep the old deck that works for them. You start listening to your customers and what they need you to maintain or fix. The natural motion of any company is to slow down. So as CEO you need to get the company beyond that.\"\n\nSo what does high-efficiency innovation and rapid execution look like at GitLab?\n\nHere are a few examples of the pace at which they operate:\n\n1. They release a new version of GitLab every single month.\n1. Everything is in draft and subject to change. It's always under construction.\n1. They don't repeat themselves. GitLab documents how it does things in a [handbook](/handbook/). It's 1,000 pages long. If it's in the handbook, don't repeat it.\n1. Every conference call starts on time. No wasted minutes. Sid checks 15-30 action items off the list in each of his 25-minute 1-on-1 meetings.\n1. They trust their team to multi-task appropriately. If you want to check email during a meeting, it's probably more important than the meeting is to you.\n\nThere's a final, often-overlooked value of speed: it's exciting. Workplaces that manage to pair speed with evident progress allow their teams to feel accomplished, motivated, and on the edge of their seats. It's an easy hack for maintaining employee engagement.\n\n## Don't sacrifice long-term vision for short-term speed. Be accountable for both.\n\n*What is GitLab is rapidly executing on?*\n\nMany companies who prize execution do a great job at sustaining and growing their existing products. They're often quite efficient – though they could learn something from the speed at which GitLab operates. But they're more likely to struggle with thinking far out into the future.\n\nTo paraphrase Stephen Covey, there's a big difference between efficiency and effectiveness. A jet flying 1,000 miles per hour is efficient; a jet flying 1,000 miles per hour in the right direction is effective.\n\n#### So if GitLab as an organization is a jet built for speed – where is it going?\n\nSid wants GitLab to help multiply the potential for progress that humanity can drive into the world. \"Our mission is 'Everyone can contribute.' That's a long-term vision. That's 10 years. It means changing all of our culture to read-write. Think Wikipedia. They allow everyone to contribute. Imagine if we can do that. You release a lot of progress. You 10x the progress. [Multipliers like that are] thrown around so easily in Silicon Valley that you have to be cautious. But if you look at 100,000 companies using GitLab, and really being able to get their out software faster. I'm willing to stand behind that.\"\n\nThat means that not only is GitLab thinking about efficiency and effectiveness, but it's also thinking about impact. Impact on the scale of human progress and global culture.\n\nThat's pretty big and pretty far out. So how do they make sure the pilots keep looking way out there on the horizon while flying at supersonic speeds and maneuvring around today's obstacles?\n\nFirst, you set the mission and vision. Everything starts with that mission in mind. Everyone knows it, and Sid talks about it [every chance he gets](https://blog.ycombinator.com/gitlab-distributed-startup/).\n\nNext, you draw that vision back into today's actions with cascading plans. Create a three-to-five-year strategy about how to get there. Craft a yearly plan and [product vision](/blog/gitlabs-2018-product-vision/) – one that's concrete enough that you could show screenshots of what it will look like a year from now. Define quarterly goals (GitLab's [OKRs](/company/okrs/) are public), monthly targets, and smaller sprints to get you there.  \n\nThird, you make each of these regular goals highly ambitious, close-in, unambiguous, and concrete. \"Setting high goals pushes people beyond their comfort zone,\" Sid told me. At Y Combinator, he says they taught GitLab that \"20 percent is the new 10 percent.\" That's 20 percent growth, every single week. It's a high number, and it forces them to make completely different types of decisions.\n\nFinally, because the short-term goals are incredibly high, you focus on iteration. [Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is one of GitLab's core values. They define it clearly: \"We do the smallest possible thing and get it out as quickly as possible.\" And they don't just ask developers and designers to work this way. \"We put the whole company on that diet. It made sense for the product. But for marketing, sales, etc., we've gotten them there. If you say 'Grow XYZ in the next two weeks,' you do completely different things. I don't know why that is, but you do.\"\n\n### Encode culture and values to keep the company moving faster.\n\n*How does GitLab do what they do?*\n\nIt was GitLab's strong culture and values orientation that first drew me to them as an organization. I'm often on the lookout for how leaders drive values through their organizations – from Jon Stewart on \"The Daily Show\" to the frontline teams at Starbucks and Zappos.\n\nThe best values-oriented organizations draw explicit links between their values, their competitive advantages, and their daily activities.\n\nHere's where GitLab stands out.\n\nIn just one day of shadowing GitLab's staff, the team talked about values during a product meeting, two interviews with prospective employees, an analyst call and a 1-on-1 with a teammate. The whole team is drawing causal links between what it does (its business activities) and how it does them (the values they live by).\n\n>The whole team is drawing causal links between what it does (its business activities) and how it does them (the values they live by).\n\nSo how does that work? It requires leaders choosing to identify not just the values that matter, but also how to organize around them. Sid told us \"I didn't do a very good job coding GitLab [when he and his co-founders all started back in 2011]. But I think I'm doing a good job coding GitLab the company.\"\n\nAs a remote-only company, \"coding the company\" means (1) writing things down, (2) referencing back to what's been written and (3) reinforcing it through rewards.\n\nAll of this \"GitLab the company\" code is captured in its handbook. The handbook is referenced in almost every conversation. The handbook consists of over 1,000 pages of text. It's a tool that GitLab uses to capture and detail out decisions that have already been made about all of its core business practices – marketing, sales, product, team operations, finance, and more. It's a constant practice for Sid and the team to reference the handbook in meetings, and to send people to look there first before continuing the conversation.\n\nThe values take a prime place in the handbook. There, values are defined, not just described. Words can mean different things in different contexts – and these values indicate a particular thing at GitLab. The definitions are brought to life with 5-15 concrete actions that employees often take for each of the six values. As Sid says, \"The culture got stronger because it is written down. And because it improves and is edited over time.\" And then they're reinforced every day through hiring, coaching, performance reviews and casual conversations.\n\nIt's rare that companies think about linking their values with their competitive advantage. It's rarer still that a company brings its values to life through the day-to-day work. What GitLab has unlocked with its values orientation is not just good and meaningful work. It has also opened the most important competitive advantage in its business model – speed.\n\n>It's rare that companies think about linking their values with their competitive advantage. It's rarer still that a company brings its values to life through the day-to-day work.\n\nIt says it right there in the 'Why have values' section of the handbook: \"Values are a framework for distributed decision-making; they allow you to determine what to do without asking your manager.\" By encoding values deep into everyday activities of the company, everyone on GitLab's team can make decisions faster.\n\nIn DevOps, winning is about getting there first. GitLab coded values right into its organizational design to make sure it could always be the fastest to market.\n\n## Parting thoughts: Will high-efficiency innovation work for you?\n\nAlthough they weren't thinking about large corporations, the oracles of Delphi were right. The most important maxim is to \"know thyself.\" The GitLab prescription isn't right for every company. What's most important is to build a culture of innovation that reflects your strengths and your values.\n\nGitLab is a company of executors, of coders and of people who aren't afraid to work out in the open and make mistakes. They see clear problems. Then they attack. GitLab built a method of innovation that works well for them, but it's not a one-size-fits-all approach. It won't work for everyone, but it might work for you.\n\n#### Here are the questions you should ask:\n\n1. Is the problem you're facing clear to you and your competitors?\n1. Would the people on your team prioritize efficiency over novelty if it'll get you there first?\n1. Do you know how to make trade-offs between what works for your existing customers and what might work better for future customers?\n\nIf you answered yes, pay close attention to what GitLab is doing. Their unrelentingly quick iterative process might be just what the doctor ordered to scale your innovation.\n\nIf not, the GitLab system isn't the right fit for you. You'll want to organize your innovation in a different way.\n\nAs one example, we built Jump to handle an entirely different type of [highly ambiguous problems](https://www.forbes.com/sites/brucerogers/2018/01/25/innovation-leaders-dev-patnaik-co-founder-and-ceo-jump-associates/3/#42518f211238). So it makes sense that some of Jump's values (Passion, Curiosity, Enthusiasm, Intention, Acuity, Initiative and Play) look very much the opposite of GitLab's values (Collaboration, Results, Efficiency, Diversity, Iteration and Transparency).\n\nJump and GitLab are both deeply values-oriented companies with rich and collaborative cultures focused on innovation. And yet we value different things, have different org structures, hire different types of people and work on very different types of problems.\n\nSo what if you're like me and your company's approach or market situation is quite different than GitLab's? Take this as an opportunity to learn from seeing your mirror image.\n\nFirst, test parts of their approach. See what works for you and your team. Then, consider the polar opposites. Find the points where you value distinctly different things, and ask why. Learn why their method works for them, and why it wouldn't work for you. Then flip the script – what's an approach to innovation that GitLab would never do that would be a difference maker for you if you did it?\n\nEither way, take note of what GitLab is doing and how they're doing it. It's amazing, effective, growing like crazy and a great place to work. And ask yourself – should my team be innovating like that?\n\n## About the guest author\n\nJay Newman is Director of Strategy at Jump Associates, a leading strategy and innovation firm. Learn more at [jumpassociates.com](http://www.jumpassociates.com) and connect directly with Jay on [LinkedIn](https://www.linkedin.com/in/jaynewman1).\n\nPhoto by [Karsten Würth](https://unsplash.com/photos/ZKWgoRUYuMk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,811,723],{"slug":3389,"featured":6,"template":680},"high-efficiency-innovation","content:en-us:blog:high-efficiency-innovation.yml","High Efficiency Innovation","en-us/blog/high-efficiency-innovation.yml","en-us/blog/high-efficiency-innovation",{"_path":3395,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3396,"content":3401,"config":3406,"_id":3408,"_type":14,"title":3409,"_source":16,"_file":3410,"_stem":3411,"_extension":19},"/en-us/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x",{"title":3397,"description":3398,"ogTitle":3397,"ogDescription":3398,"noIndex":6,"ogImage":1452,"ogUrl":3399,"ogSiteName":667,"ogType":668,"canonicalUrls":3399,"schema":3400},"How a fix in Go 1.9 sped up our Gitaly service by 30x","After noticing a worrying pattern in Gitaly's performance, we uncovered an issue with fork locking affecting virtual memory size. Here's how we figured out the problem and how to fix it.","https://about.gitlab.com/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How a fix in Go 1.9 sped up our Gitaly service by 30x\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Newdigate\"}],\n        \"datePublished\": \"2018-01-23\",\n      }",{"title":3397,"description":3398,"authors":3402,"heroImage":1452,"date":3403,"body":3404,"category":743,"tags":3405},[2511],"2018-01-23","\n\n[Gitaly](https://gitlab.com/gitlab-org/gitaly) is a Git RPC service that we are currently rolling out\nacross GitLab.com, to replace our legacy NFS-based file-sharing solution. We expect it to be faster, more stable\nand the basis for amazing new features in the future.\n\nWe're still in the process of porting Git operations to Gitaly, but the service has been\nrunning in production on GitLab.com for about nine months, and currently peaks at about 1,000\n[gRPC](https://grpc.io/) requests per second. We expect the migration effort to be completed\nby the beginning of April at which point all Git operations in the GitLab application will\nuse the service and we'll be able to decommission NFS infrastructure.\n\n\u003C!-- more -->\n\n## Worrying performance improvements\n\nThe first time we realized that something might be wrong was shortly after we'd finished deploying a new release.\n\nWe were monitoring the performance of one of the gRPC endpoints for the Gitaly service and noticed that the\n99th percentile performance of the endpoint had dropped from 400ms down to 100ms.\n\n![400ms to 100ms latency drop](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-01.png){: .shadow.center}\nLatencies drop from 400ms to 100ms after a deploy, for no good reason\n{: .note .text-center}\n\nThis should have been fantastic news, but it wasn't. There were no changes that should have led to faster\nresponse times. We hadn't optimized anything in that release; we hadn't changed the runtime and the new\nrelease was using the same version of Git.\n\nEverything _should have_ been exactly the same.\n\nWe started digging into the data a little more and quickly realised that 400ms is a very high latency for\nan operation that simply confirms the existence of a [Git reference](https://git-scm.com/book/en/v2/Git-Internals-Git-References).\n\nHow long had it been this way? Well it started about 24 hours after the previous deployment.\n\n![100ms to 400ms latency hike](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-02.png){: .shadow.center}\nLatencies rising over a 24 hour period following a deployment, for no good reason\n{: .note .text-center}\n\nWhen browsing our Prometheus performance data, it quickly became apparent that this pattern was being repeated with each\ndeployment: things would start fast and gradually slow down. This was occurring across all endpoints. It had been this way for a while.\n\nThe first assumption was that there was some sort of resource leak in the application, causing the host to slow\ndown over time. Unfortunately the data didn't back this up. CPU usage of the Gitaly service did increase, but the\nhosts still had lots of capacity.\n\n![Gitaly CPU charts](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-03.png){: .shadow.center}\nGitaly CPU increasing with process age, but not enough to explain the problem\n{: .note .text-center}\n\nAt this point, we still didn't have any good leads as to the cause of the problem, so we decided to further\nimprove the observability of the application by adding [pprof profiling support](https://golang.org/pkg/net/http/pprof/)\nand [cAdvisor](https://github.com/google/cadvisor) metrics.\n\n## Profiling\n\nAdding pprof support to a Go process is [very easy](https://gitlab.com/gitlab-org/gitaly/merge_requests/442).\nThe process already has a Prometheus listener and we added a pprof handler on the same listener.\n\nSince production teams would need to be able to perform the profiling without our assistance, we\nalso [added a runbook](https://gitlab.com/gitlab-com/runbooks/blob/master/howto/gitaly-profiling.md).\n\nGo's pprof support is easy to use and in our testing, we found that the overhead it\nadded to production workloads was negligible, meaning we could use it in production without concern\nabout the impact it would have on site performance.\n\n## cAdvisor\n\nThe Gitaly service spawns Git child processes for many of its endpoints. Unfortunately these Git\nchild processes don't have the same instrumentation as the parent process so it was\ndifficult to tell if they were contributing to the problem. (Note: we record [`getrlimit(2)`](http://man7.org/linux/man-pages/man2/getrlimit.2.html) metrics for Git processes but cannot observe grandchild processes spawned by Git, which often do much of the heavy lifting)\n\nOn GitLab.com, Gitaly is managed through systemd, which will automatically create a cgroup for\neach service it manages.\n\nThis means that Gitaly and its child processes are contained within a single cgroup, which we\ncould monitor with [cAdvisor](https://github.com/google/cadvisor), a Google monitoring tool\nwhich supports cgroups and is compatible with Prometheus.\n\nAlthough we didn't have direct metrics to determine the behavior of the Git processes, we could\ninfer it using the cgroup metrics and the Gitaly process metrics: the difference between the\ntwo would tell us the resources (CPU, memory, etc) being consumed by the Git child processes.\n\nAt our request, the production team [added cAdvisor to the Gitaly servers](https://gitlab.com/gitlab-com/infrastructure/issues/3307).\n\nHaving cAdvisor gives us the ability to know what the Gitaly service, including all its child\nprocesses, is doing.\n\n![cAdvisor graphs for the Gitaly cgroup](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-04.png){: .shadow.center}\ncAdvisor graphs of the Gitaly cgroup\n{: .note .text-center}\n\n## From bad to worse. Much, much worse...\n\nIn the meantime, **[the situation had got far worse](https://gitlab.com/gitlab-org/gitaly/issues/823)**.\n Instead of only seeing gradual latency increases over time, we were now seeing far more serious lockups.\n\nIndividual Gitaly server instances would grind to a halt, to the point where all new incoming TCP connections\nwere not being accepted. This proved to be a problem to using pprof: during the lockup the connection\nwould time out when attempting to profile the process. Since the reason we added pprof was to observe the\nprocess under duress, that approach was a bust.\n\nInterestingly, during a lock-up, CPU would actually decrease – the system was not overloaded, but actually\n _idled_. Iops, iowait and CPU would all drop way down.\n\nEventually, after a few minutes the service would recover and there would be a surge in backlogged\nrequests. Usually though, as soon as the state was detected, the production team would restart the\nservice manually.\n\nThe team spent a significant amount of time trying to recreate the problem locally, with little success.\n\n## Forking locks\n\nWithout pprof, we fell back to [SIGABRT thread dumps](http://pro-tips-dot-com.tumblr.com/post/47677612115/kill-a-hung-go-process-and-print-stack-traces)\nof hung processes. Using these, we determined that the process had a large amount of contention around [`syscall.ForkLock`](https://gitlab.com/gitlab-org/gitaly/issues/823#note_50951140)\nduring the lockups. In one dump, 1,400 goroutines were blocked waiting on `ForkLock` – most for several minutes.\n\n`syscall.ForkLock` has [the following documentation](https://github.com/golang/go/blob/release-branch.go1.8/src/syscall/exec_unix.go#L17):\n\n> Lock synchronizing creation of new file descriptors with fork.\n\nEach Gitaly server instance was `fork/exec`'ing Git processes about 20 times per second so we seemed to finally have a very promising lead.\n\n## Serendipity\n\n[Researching ForkLock](https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/9365#note_54342481) led us to an issue on the Go repository,\nopened in 2013, about switching from `fork/exec` to [`clone(2)`](https://man7.org/linux/man-pages/man2/clone.2.html) with `CLONE_VFORK` and `CLONE_VM`\non systems that support it: [golang/go#5838](https://github.com/golang/go/issues/5838)\n\nThe `clone(2)` syscall with `CLONE_VFORK` and `CLONE_VM` is the same as\nthe [`posix_spawn(3)`](http://man7.org/linux/man-pages/man3/posix_spawn.3.html) c function, but the latter is easier to\nrefer to, so let's use that.\n\nWhen using `fork`, the child process will start with a copy of the parent processes' memory.\nUnfortunately this process takes longer the larger the virtual memory footprint the process has.\nEven with copy-on-write, it can take several hundred milliseconds in a memory-intensive process.\n`posix_spawn` doesn't copy the parent processes' memory space and has a roughly constant time.\n\nSome good benchmarks of `fork/exec` vs. `posix_spawn` can be found here: [https://github.com/rtomayko/posix-spawn#benchmarks](https://github.com/rtomayko/posix-spawn#benchmarks)\n\nThis seemed like a possible explanation. Over time, the virtual memory size (VMM) of the Gitaly process would increase. As VMM\nincreased, each [`fork(2)`](http://man7.org/linux/man-pages/man2/fork.2.html) syscall would take longer. As fork latency increased, `syscall.ForkLock` contention would increase.\nIf `fork` time exceeded the frequency of `fork` requests, the system could temporarily lock up entirely.\n\n(Interestingly, [`TCPListener.Accept`](https://golang.org/pkg/net/#TCPListener.Accept)\n[also interacts](https://github.com/golang/go/blob/2ea7d3461bb41d0ae12b56ee52d43314bcdb97f9/src/net/sock_cloexec.go#L20) with `syscall.ForkLock`,\nalthough only on older versions of Linux. Could this be the cause of our failure to connect to the pprof listener during a lockup?)\n\nBy some incredibly good luck, [golang/go#5838](https://github.com/golang/go/issues/5838), the switch from `fork` to `posix_spawn`, had,\nafter several years' delay, recently landed in Go 1.9, just in time for us. Gitaly had been compiled with Go 1.8.\n We quickly built and tested a new binary with Go 1.9 and manually deployed this\non one of our production servers.\n\n### Spectacular results\n\nHere's the CPU usage of Gitaly processes across the fleet:\n\n![CPU after Go 1.9](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-05.png){: .shadow.center}\nCPU after recompiling with Go 1.9\n{: .note .text-center}\n\nHere's the 99th percentile latency figures. This chart is using a logarithmic scale, so we're talking about two orders of\nmagnitude faster!\n\n![30x latency drops with Go 1.9](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-06.png){: .shadow.center}\nEndpoint latency after recompiling with Go 1.9 (log scale)\n{: .note .text-center}\n\n## Conclusion\n\nRecompiling with Go 1.9 solved the problem, thanks to the switch to `posix_spawn`. We learned several other lessons\nin the process too:\n\n1. Having solid application monitoring in place allowed us to detect this issue, and start investigating it, far\n   earlier than we otherwise would have been able to.\n1. [pprof](https://blog.golang.org/profiling-go-programs) can be really helpful, but may not help when a process\n   has locked up and won't accept new connections. pprof is lightweight enough that you should consider adding it to your application _before_ you need it.\n1. When all else fails, [`SIGABRT thread dumps`](http://pro-tips-dot-com.tumblr.com/post/47677612115/kill-a-hung-go-process-and-print-stack-traces) might help.\n1. [`cAdvisor`](https://github.com/google/cadvisor) is great for monitoring cgroups. Systemd services each run in\n   their own cgroup, so `cAdvisor` is an easy way of monitoring a service and all its child processes, together.\n\n[Photo](https://unsplash.com/photos/jJbQBP_yh68?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Javier García on [Unsplash](https://unsplash.com/search/photos/slow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,1295],{"slug":3407,"featured":6,"template":680},"how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x","content:en-us:blog:how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x.yml","How A Fix In Go 19 Sped Up Our Gitaly Service By 30x","en-us/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x.yml","en-us/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x",{"_path":3413,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3414,"content":3420,"config":3426,"_id":3428,"_type":14,"title":3429,"_source":16,"_file":3430,"_stem":3431,"_extension":19},"/en-us/blog/how-a-remote-internship-at-gitlab-shaped-my-career",{"title":3415,"description":3416,"ogTitle":3415,"ogDescription":3416,"noIndex":6,"ogImage":3417,"ogUrl":3418,"ogSiteName":667,"ogType":668,"canonicalUrls":3418,"schema":3419},"My experience as a recruiting intern at GitLab","Why interning for an asynchronous and all-remote company is the best way to go.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673044/Blog/Hero%20Images/books-internship-post.jpg","https://about.gitlab.com/blog/how-a-remote-internship-at-gitlab-shaped-my-career","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"My experience as a recruiting intern at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Trevor Knudsen\"}],\n        \"datePublished\": \"2019-12-06\",\n      }",{"title":3415,"description":3416,"authors":3421,"heroImage":3417,"date":3423,"body":3424,"category":808,"tags":3425},[3422],"Trevor Knudsen","2019-12-06","\n\n[Applications for GitLab's engineering internship program are open! Apply now](/handbook/engineering/internships/).\n{: .alert .alert-gitlab-purple .text-center}\n\nWhile a remote internship may seem like a foreign idea with many limitations to some, in reality, the options can be far less limiting than office work. Working remotely comes with many perks, including work-life balance, ability to travel, and the flexibility to work wherever you please, but the benefits go beyond that. Taking an internship away from the office offers learning experiences. You have the opportunity to work with people outside your city – and instead collaborate with people from all around the world. Flexibility, learning opportunities, mentors, and work experience are all so accessible with a remote internship.\n\n## Why I joined GitLab\n\nAs a communications major at California State University, Fullerton, I was required to find an internship. While I was looking for an internship, the [all-remote](/blog/the-remote-manifesto/) setup of GitLab immediatly caught my attention, and there was an opportunity as recruiting intern that I could not pass up. I applied and after going through the interview process I was offered the position. 🎉\n\n### Globally distributed team\n\nI started with GitLab in October 2017, which was my senior year of college. My first day with GitLab was such a rush. I met with my mentor, manager, and the team, and went through onboarding. I was welcomed as if I was a full-time employee by my team, and I quickly realized my entire team was my mentor. I had coworkers in the United Kingdom, South Africa, and all across the United States. While every team member was helpful, one of my greatest mentors was (and continues to be) [Nadia Vatalidis](/company/team/#vatalidis). She worked as a recruiter also and checked in with me on a regular basis to make sure I felt comfortable using the GitLab tool and see what tasks I was working on. We also collaborated on different projects the recruting team was working on.\n\n### Our values\n\nGitLab is guided by its values, and each day I saw these [values](https://handbook.gitlab.com/handbook/values/) used in every aspect of our work. The [diversity](https://handbook.gitlab.com/handbook/values/#diversity-inclusion) of the recruiting team was a strength, bringing creative solutions to the table each day. The entire company [collaborated](https://handbook.gitlab.com/handbook/values/#collaboration) on projects and shared ideas, while always respecting each other's thoughts and opinions. One of the great things about working with GitLab was that if an idea was presented, it could be implemented after a bit of discussion even if not yet refined. This ensured that we operated with [efficency](https://handbook.gitlab.com/handbook/values/#efficiency) and [transparency](https://handbook.gitlab.com/handbook/values/#transparency) values. Our team would push forward initiatives and ideas and [iterate](https://handbook.gitlab.com/handbook/values/#iteration) on them as they were implemented.\n\n### All-remote and asyncronous workflows\n\nThe wonderful thing about GitLab is I was able to work when I wanted. When I had midterms coming up, I was able to take a few days off to study. Vacation was never a hindrance, I simply took the days off. GitLab has a [no ask, must tell](/handbook/paid-time-off/) PTO policy, meaning as long as I shared my plans with manager and team, I could take the time off. Working remotely also allowed me to work from anywhere. When I took a trip to Zion National Park in Utah with friends, I was able to adjust my working hours so I could explore by day and work in the evenings. On a snowy day in Zion, I sat on the back patio next to a warm fire, watching the beauty of the snowfall. It was this experience that helped me recognize the true potential of all-remote. The best part about the flexibility is even when I adjusted my work hours, I was never truly alone. Team members in Europe, the Middle East, and even in Africa were online when the team in the Americas has already logged out. Someone was always online and available for support.\n\n## Not your average internship\n\nMy experience as a GitLab intern was not typical, because it was a true work experience. I got the pleasure of working alongside the team on major projects, such as looking into a new application tracking system. I got to be involved in screening calls, scheduling interviews for candidates, and helped implement a better solution on how to maintain company assets. My internship helped me learn and grow the skills necessary to be part of the recruiting team, and ultimately landed me a full-time position at GitLab just six months into my internship.\n\nI learned so much as a GitLab team member, and met so many people who continue to be a mentor to me today. An all-remote internship was also ideal for me as a student, because I was able to have a solid work-life balance – something I continue to enjoy today.\n\nIf you're a student or career-changer searching for an internship, be sure to not undercut remote work opportunities. Check out GitLab's [current internship opportunites](/handbook/engineering/internships/). You can really learn so much as part of a fully distributed team.\n\n_Read more about [making remote internships successful](/blog/making-remote-internships-successful/)._\n\nCover image by Patrick Tomasso on [Unsplash](https://unsplash.com)\n{: .note}\n",[811,9,832],{"slug":3427,"featured":6,"template":680},"how-a-remote-internship-at-gitlab-shaped-my-career","content:en-us:blog:how-a-remote-internship-at-gitlab-shaped-my-career.yml","How A Remote Internship At Gitlab Shaped My Career","en-us/blog/how-a-remote-internship-at-gitlab-shaped-my-career.yml","en-us/blog/how-a-remote-internship-at-gitlab-shaped-my-career",{"_path":3433,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3434,"content":3440,"config":3444,"_id":3446,"_type":14,"title":3447,"_source":16,"_file":3448,"_stem":3449,"_extension":19},"/en-us/blog/how-all-remote-supports-inclusion-and-bolsters-communities",{"title":3435,"description":3436,"ogTitle":3435,"ogDescription":3436,"noIndex":6,"ogImage":3437,"ogUrl":3438,"ogSiteName":667,"ogType":668,"canonicalUrls":3438,"schema":3439},"How all-remote supports inclusion and bolsters communities","When your hiring pipeline is more inclusive, your team becomes more inclusive.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679679/Blog/Hero%20Images/kuala-lumpur-dm.jpg","https://about.gitlab.com/blog/how-all-remote-supports-inclusion-and-bolsters-communities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How all-remote supports inclusion and bolsters communities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Murph\"}],\n        \"datePublished\": \"2019-12-06\",\n      }",{"title":3435,"description":3436,"authors":3441,"heroImage":3437,"date":3423,"body":3442,"category":808,"tags":3443},[890],"\n\nA diverse and [inclusive](/company/culture/inclusion/#fully-distributed-and-completely-connected) team is a stronger, [smarter](https://hbr.org/2016/11/why-diverse-teams-are-smarter), and more empathetic team. As industries grapple with mechanisms to encourage and facilitate inclusivity, all-remote teams — where [100% of team members](/company/culture/all-remote/stages/) are empowered to work from anywhere — are more inclusive by default.\n\n## All-remote exudes inclusiveness\n\n![GitLab colleagues gathered in London](https://about.gitlab.com/images/blogimages/gitlab-commit-london-2019-colleagues.jpg){: .shadow.medium.center}\nGitLab colleagues gathered in London.\n{: .note.text-center}\n\nResearch from [Deloitte](https://www2.deloitte.com/us/en/insights/deloitte-review/issue-22/diversity-and-inclusion-at-work-eight-powerful-truths.html) shows that \"teams with inclusive leaders are 17% more likely to report that they are high performing, 20% more likely to say they make high-quality decisions, and 29% more likely to report behaving collaboratively.\"\n\nGitLab recognizes that one [advantage](/company/culture/all-remote/benefits/) to being an all-remote company is that we can [hire talent from a global pool](/handbook/hiring/). We are not restricted to the usual job centers, which gives us access to a tremendous amount of talent that many other companies will not consider for employment. It may take more effort to find talent in more diverse places, but that is an effort we are willing to make.\n\nCurrently, GitLab employs over [1,000 team members across more than 60 countries](/company/team/). This level of richness in cultural and geographic diversity is enabled by all-remote, and naturally shields against biases that form when entire teams live, work, and interact in the same region of the world.\n\nWe're surrounded by a tapestry of unique cultures, celebrations, and traditions. Not only does this give us a broader view of the world internally, it enables us to be more empathetic to the broader open-source community.\n\n## Sourcing talent from around the globe\n\n![All-remote allows people to thrive wherever they call home. Image by [Darren Murph](https://twitter.com/darrenmurph)](https://about.gitlab.com/images/blogimages/night-train-city-sweden.jpg){: .shadow.medium.center}\nAll-remote allows people to thrive wherever they call home. Image by [Darren Murph](https://twitter.com/darrenmurph)\n{: .note.text-center}\n\nGitLab's six [values](https://handbook.gitlab.com/handbook/values/) are Collaboration, Results, Efficiency, Diversity, Inclusion & Belonging , Iteration, and Transparency, and together they spell CREDIT.\n\nTrue to those values, GitLab strives to hire team members who are passionate, empathetic, kind, tenacious, and ambitious, *regardless* of their location. By opening the recruiting funnel to as [broad a swath of the world as we can](/handbook/people-group/employment-solutions/#country-hiring-guidelines), we create a more inclusive hiring environment, lean on tight collaboration to drive progress [across time zones](/company/culture/all-remote/management/#asynchronous), and focus our hiring decisions on results rather than location.\n\nHiring an all-remote team from across the globe allows GitLab to [pay local rates](/blog/why-we-pay-local-rates/). By hiring brilliant minds in locations with lower costs of living, GitLab is able to save money to hire even more people as we [scale our business](/company/culture/all-remote/scaling/).\n\n- All-remote means that you [will not sacrifice career advancement](/handbook/people-group/learning-and-development/) by working outside of the office, as even GitLab executives are fully remote.\n- All-remote creates a workplace where caregivers, individuals with physical disabilities, etc., are not disadvantaged by being unable to regularly commute into an office.\n- GitLab's approach to [spending company money](/handbook/spending-company-money/) enables all team members to create a work environment uniquely tailored for them.\n- All-remote enables those who must relocate frequently for family and personal reasons to take their career with them.\n- All-remote allows movement and relocation to physical settings that contribute to an individual's health (e.g., moving to a location with an improved air quality index).\n\n## Bolstering communities\n\n![When people aren't forced to relocate for work, their communities benefit. Image by [Darren Murph](https://twitter.com/darrenmurph)](https://about.gitlab.com/images/blogimages/community-outdoors-eu.jpg){: .shadow.medium.center}\nWhen people aren't forced to relocate for work, their communities benefit. Image by [Darren Murph](https://twitter.com/darrenmurph)\n{: .note.text-center}\n\nAll-remote encourages team members to work and live in a place where they are most fulfilled. This enables our team to reside in regions or communities that provides far more than shelter, but enriches their life experience by enabling long-lasting relationships with people who shape and support them.\n\nBy not forcing people to relocate for work, companies which embrace all-remote are benefitting local comunities in a significant way. Rural communities receive [outsized economic benefit](https://www.lajuntatribunedemocrat.com/news/20190808/state-remote-work-program-could-help-rural-communities), while major metropolitan areas experience less strain on infrastructure.\n\nStay-at-home [parents](/company/culture/all-remote/people/#parents) who wish to further their career, [caregivers](/company/culture/all-remote/people/#caretakers), [military spouses](/company/culture/all-remote/people/#military-spouses-and-families), and those who struggle with mobility can all contribute meaningfully when a company removes the location requirement from the job description.\n\nAll-remote opens the hiring door to places far beyond the usual job centers of the world. Candidates are not limited by geography and [we champion this approach](/blog/all-remote-is-for-everyone/) – to the extent that it’s possible – for all companies.\n\n{: .note}\n\n",[677,9,832],{"slug":3445,"featured":6,"template":680},"how-all-remote-supports-inclusion-and-bolsters-communities","content:en-us:blog:how-all-remote-supports-inclusion-and-bolsters-communities.yml","How All Remote Supports Inclusion And Bolsters Communities","en-us/blog/how-all-remote-supports-inclusion-and-bolsters-communities.yml","en-us/blog/how-all-remote-supports-inclusion-and-bolsters-communities",{"_path":3451,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3452,"content":3458,"config":3463,"_id":3465,"_type":14,"title":3466,"_source":16,"_file":3467,"_stem":3468,"_extension":19},"/en-us/blog/how-being-public-by-default-in-security-builds-trust",{"title":3453,"description":3454,"ogTitle":3453,"ogDescription":3454,"noIndex":6,"ogImage":3455,"ogUrl":3456,"ogSiteName":667,"ogType":668,"canonicalUrls":3456,"schema":3457},"How being public by default in security builds trust","The rewards of being open in security still outweigh the challenges.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670904/Blog/Hero%20Images/corded-devices.jpg","https://about.gitlab.com/blog/how-being-public-by-default-in-security-builds-trust","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How being public by default in security builds trust\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2020-09-03\",\n      }",{"title":3453,"description":3454,"authors":3459,"heroImage":3455,"date":3460,"body":3461,"category":698,"tags":3462},[1010],"2020-09-03","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nWe sat down with GitLab sr. security researcher Mark Loveless to talk about his role, how he sees the tech industry changing and the freeing feeling that working public by default (even in Security) brings and the trust that it builds.\n\n![Mark Loveless Headshot](https://about.gitlab.com/images/blogimages/mlovelessbw.png){: .small.right.wrap-text} **Name:** Mark Loveless\n\n**Title:** Sr. Security Researcher\n\n**How long have you been at GitLab?**: I joined February 2019\n\n**GitLab handle:** [@mloveless](https://gitlab.com/mloveless)\n\n**Connect with Mark:** [LinkedIn](https://www.linkedin.com/in/markloveless) / [Twitter](https://twitter.com/simplenomad)\n\n\n\n#### Tell us what you do here at GitLab:\nI perform research on security-related issues to help protect GitLab team members as well as GitLab customers. This can involve researching a new product feature, evaluating a SaaS product that GitLab is using or considering using, or educating others via presentations and blog posts.\n\n#### What’s the most challenging or rewarding aspect of your role? \nSecurity should be painless and just a natural part of someone going about their day. If a process is implemented that makes things more secure and it causes no friction to the point that most people do not even notice it, then I’ve done a good job.\n\n#### And, what are the top 2-3 initiatives you’re currently focused on? \nIn my role, I’m focused on:\n* Outreach via blogs and security conferences. Here’s a sample blog that has links to several other posts I wrote about GitLab’s Zero Trust journey, [“We answer your most popular questions about our Zero Trust journey“](/blog/questions-regarding-our-zero-trust-efforts/)\n\n* Securing the product. This blog post, [“GitLab instance: security best practices“](/blog/gitlab-instance-security-best-practices/) was one that many in the security department helped me with and was written to help our customers harden their instances.\n\n* Occasional mouthpiece to the press on GitLab and industry security practices; again part of that outreach effort. An example: [\"Remote Work Has a Hidden Challenge: Data Security\"](https://www.inc.com/cameron-albert-deitch/remote-work-data-security.html).\n\n#### What is the most significant piece of security advice you could provide to a colleague or friend? \nEven though it is boring, do not forget the basics! This includes patching, unique passwords, and always using two-factor authentication. The press is full of stories of exotic attacks and flamboyant new bugs, but the basics eliminate the vast majority of threats.\n\n#### How did you get into security? \nIt all started as exploring, discovering that one could get into systems one was not supposed to be in. I loved it. As I got better at what I was doing, I also improved in the tech field in general, since I had to learn what system admins would do so I could avoid getting caught and being kicked out of some server. Eventually I got jobs in the tech field, and as I progressed I had a knack for the security aspects, and it went from there.\n\n#### From the perspective of your role, what’s GitLab doing better than anyone else in terms of security? \nOpenness. When I first started it seemed horrifying that all of the code and the handbook were so open, but in actuality it is quite freeing. We’re “default open” ([public by default](https://handbook.gitlab.com/handbook/values/#public-by-default)). Now this applies to the entire company and not just the security department, but it is nice that we don’t have to worry about security decisions becoming public; they will be regardless. This keeps us honest, and when someone is honest you’re more likely to trust them. This strengthens our security posture in that when we claim to be secure it can be verified, and as issues are identified (by team members or the GitLab community) we can fix them as openly as possible.\n\n#### What was your personal worst moment in the Infosec world and how did you recover? \nI have been let go from my job twice, both times after a buy-out. In one case the buying company had a policy against hiring hackers, but I expected it and I had another job lined up through a friend in the industry. In the other instance, my job went away and I did not want to transition to another department since it would involve moving. I took my buy-out money and decided to take some time off, or “funemployment”. A friend of mine named Kathy Wang - an early leader here at GitLab who helped grow the security department - saw my blog post about my time off and reached out, mentioning GitLab. So the important lesson here is that maintaining friendships in the security industry can really help in times of need. And you never know when you’ll be in a “time of need.”\n\n#### Name your favorite accomplishment that you are totally not known for. \nI was at the very first few Black Hat Briefings in Las Vegas. While a researcher at a tech firm that was a sponsor at one of those early Black Hat events, we had a brainstorming session where I came up with the idea that we should have an open bar event. Free of charge, no sales pitch stuff, just drinking and networking. It is the norm now, or at least it was pre-pandemic when conferences were in person. YOU’RE WELCOME.\n\n#### Play nostradamus for a minute.  Tell us how you see the tech or security landscape changing in the next 5 years?\nI believed that the tech industry itself would continue the move to all-remote or at least remote first, but the COVID-19 pandemic has accelerated that quite a bit. As a result I think the principles of both Zero Trust as well as BYOD (Bring Your Own Device) will become more of the norm as the tech landscape will be nearly all remote. Any company that is cloud-based with an Internet presence can do this, so many non-technical industries (marketing agencies, consulting firms, and so on) will move in this direction as well. I also believe that a passwordless world is possible, as two factor can consist of factors besides a password like biometrics and a U2F device (e.g. Yubikey), and that within five years this will start to truly become a real thing with actual industry acceptance. I’d love to see that happen, the password is simply one of the biggest failures and worse engineering designs ever.\n\n## Now, for the questions you *really* want to have answered:\n\n\n\n#### What’s your most interesting experience while traveling? \nI was stopped by TSA and I tested positive for TNT. Here’s [the whole story](https://www.markloveless.net/blog/2019/2/22/p24ekffvg7zyv4usvt1xshev5h1o8z).\n\n#### When traveling, packing cubes or no packing cubes? \nPacking cubes. My packing ritual is minimalistic. Everything is wear a pair, pack a pair, and I do sink laundry every night (I bring my own soap for this). The idea is that I have all of the tech, clothing, and accessories to last on a three week trip with a single backpack. A bad storm and a packed airport can turn an overnight trip into a week-long ordeal, and I am prepared. This requires an insane level of discipline and planning, and packing cubes are essential to making this process easier.\n\n#### When you’re not working, what do you enjoy doing/how do you spend your free time? \nIt’s a toss-up between playing and recording progressive metal music and working in the woodshop. Both are fun and I’ve done them for years.\n\n#### If you were stranded on an island, what three things would you bring? \nA water purification kit or Berkey water filtration system, a fully charged GPS, and a fully charged satellite phone. I’d immediately call for help with my exact coordinates, and sip on freshly-filtered water until help arrives.\n\n\nPhoto by [Thomas Jensen](https://unsplash.com/@thomasjsn) from [Unsplash](https://www.unsplash.com).\n{: .note}\n",[720,9],{"slug":3464,"featured":6,"template":680},"how-being-public-by-default-in-security-builds-trust","content:en-us:blog:how-being-public-by-default-in-security-builds-trust.yml","How Being Public By Default In Security Builds Trust","en-us/blog/how-being-public-by-default-in-security-builds-trust.yml","en-us/blog/how-being-public-by-default-in-security-builds-trust",{"_path":3470,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3471,"content":3477,"config":3482,"_id":3484,"_type":14,"title":3485,"_source":16,"_file":3486,"_stem":3487,"_extension":19},"/en-us/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team",{"title":3472,"description":3473,"ogTitle":3472,"ogDescription":3473,"noIndex":6,"ogImage":3474,"ogUrl":3475,"ogSiteName":667,"ogType":668,"canonicalUrls":3475,"schema":3476},"How do we handle engineering-led issues that don't belong to one team?","A recent issue sparked a lively discussion between engineering and product leadership about how 'cross-vertical' issues should be prioritized to avoid the bystander effect.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678916/Blog/Hero%20Images/how-do-we-handle-engineering-led-initiatives.jpg","https://about.gitlab.com/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How do we handle engineering-led issues that don't belong to one team?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2018-10-30\",\n      }",{"title":3472,"description":3473,"authors":3478,"heroImage":3474,"date":3479,"body":3480,"category":299,"tags":3481},[784],"2018-10-30","\nThe GitLab engineering team is split according to [product category](/handbook/product/categories/), so that team members in each category can [focus, specialize, and collaborate](/blog/configure-post/) on the same issues at the same time. They are semi-siloed by design, so what happens to issues, like tech debt, that are everyone and no one’s responsibility?\n\nThe short answer is, teams are still figuring it out. A recent [issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/52150) sparked a lively discussion and video call, which you can watch below. Listen in below on the discussion between engineering and product leadership about how technical debt or other engineering initiatives that are \"cross-vertical\" (that is, touch on many different product areas) should be prioritized given that there isn't one clear point of contact or responsibility for those issues.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/3ZEI4W_Cb2g\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### The gist\n\nThe issue that started it all had to do with a task that would have been assigned to the former Platform team, which used to be a catch-all that has since been split up into Create and Manage. Engineering Manager, Create [Douwe Maan](/company/team/#DouweM) explains, “With all backend teams now focused on specific product areas... there is no team to take on these kinds of backend-wide, non-product-area specific issues anymore.”\n\nHe continues, “Issues like this affect all backend teams equally, so we fall prey to the bystander effect. When an engineering manager gets to make room in a given release for an engineering-led initiative, they have the choice between issues like this, that any team could pick up, and product area-specific issues, that aren't going to get done unless their team does it, so the latter will have a far higher chance of being picked. Everyone cares about these kinds of issues, which means no one cares... there are many issues (technical debt and otherwise) that aren't currently anyone's responsibility, so they won't get done.”\n\nThis felt like a recurring problem due to other recent examples of cross-vertical initiatives stalling, like this issue to [switch to Rails 5 in production](https://gitlab.com/gitlab-org/gitlab-ce/issues/48991), and this issue to [update GitLab's referrer policy](https://gitlab.com/gitlab-org/gitlab-ce/issues/39147).\n\n### The research\n\nWe've heard from our community that this is a common problem, especially when working with others in different functions. In [recent interviews](https://drive.google.com/file/d/1A5mSNoPJydjcWKE4rdO2287sjnABxGDA/view) with 15 DevOps engineers, many expressed their frustration at the amount of reactive work and rework that they face, and identified a lack of successful coordination and empathy between different teams as the culprit. One interviewee said he thought this is inherent to working with some functions. Because of how release schedules work for developers and security engineers, he thinks these groups are the least likely to feel they are able to assign cycles to some proactive tasks, like fixing technical debt before it's critical.\n\nThe nearly 20 [software engineers](https://drive.google.com/file/d/1EVrjVcgIBbuNf4Gwenajsiy6Wv9HsTJw/view) we [interviewed](https://drive.google.com/file/d/15GksPiH0xmy4nRhylhMDIWmuvdHMWof4/view) also brought up their frustration at the way that technical debt can transform a seemingly simple task into a massive effort requiring them to rewrite or refactor a large chunk of code. More than the time spent on these tasks, several developers mentioned their concern that others might see them as dragging their feet and becoming a blocker when they take the time to resolve the technical debt. After all, it was just \"a simple task.\"\n\nThe responsibility to fix these issues becomes even more muddied when no particular team owns them. One [study of 95 teams in 25 leading corporations found that the majority of cross-functional teams are dysfunctional](https://hbr.org/2015/06/75-of-cross-functional-teams-are-dysfunctional), in large part because siloes self-perpetuate. The authors argue the solution is to create a “Portfolio Governance Team (PGT), where high-level leaders make complex decisions on the various projects in their portfolio together.\" The number one rule for making a PGT successful? \"Every project should have an end-to-end accountable leader.\"\n\n### The fix\n\nAlong these lines, one long-term solution being discussed at GitLab is establishing a dedicated team that will transcend the product areas and be responsible for these murky in-between issues. But Director of Engineering, Dev Backend [Tommy Morgan](/company/team/#itstommymorgan) adds, “Even if we had a team that was in place to handle issues like this one, there will always be boundary conditions. As Product is responsible for prioritizing work, if we need to do any horse-trading or other determination to figure out where the work should land, I think that's something that Product should work out.”\n\nShort of creating a new team, Product Managers and Engineering Managers will need to frankly discuss their own priorities and incentives in order to get these tasks scheduled.\n\nWhat has your org tried? Is it working? Tweet us [@gitlab](https://twitter.com/gitlab).\n\n[Photo](https://unsplash.com/photos/fIq0tET6llw) by [Diego PH](https://unsplash.com/@jdiegoph) on Unsplash.\n{: .note}\n",[811,1440,9,723],{"slug":3483,"featured":6,"template":680},"how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team","content:en-us:blog:how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team.yml","How Do We Handle Engineering Led Initiatives That Dont Belong To One Team","en-us/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team.yml","en-us/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team",{"_path":3489,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3490,"content":3496,"config":3501,"_id":3503,"_type":14,"title":3504,"_source":16,"_file":3505,"_stem":3506,"_extension":19},"/en-us/blog/how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization",{"title":3491,"description":3492,"ogTitle":3491,"ogDescription":3492,"noIndex":6,"ogImage":3493,"ogUrl":3494,"ogSiteName":667,"ogType":668,"canonicalUrls":3494,"schema":3495},"How the GitLab iteration value drives innovation through the engineering","GitLab is a unique place to be a developer. Here's why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668224/Blog/Hero%20Images/inside-our-new-development-team-lead-persona.jpg","https://about.gitlab.com/blog/how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How the GitLab iteration value drives innovation through the engineering\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-06-10\",\n      }",{"title":3491,"description":3492,"authors":3497,"heroImage":3493,"date":3498,"body":3499,"category":808,"tags":3500},[950],"2022-06-10","GitLab is focused on helping developers iterate faster and innovate more collaboratively – and that focus on enabling iteration extends to our own developer culture.\n\nAs an organization, our [CREDIT values](https://handbook.gitlab.com/handbook/values/) are hardwired into our operations and culture. This empowers our development teams to work together – using our own product – to offer QA, feedback, and strategies that make everyone’s work stronger and help our organization iterate faster. \n\nWe asked several engineers and engineering leaders at GitLab to tell us, in their own words, how our values come to life in our engineering organization and how that makes GitLab a unique place to be a developer.\n\n## What attracts engineers to GitLab\n\nTo start, we wanted to understand what attracted some of our current engineers and engineering leaders to join GitLab.\n\n**You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.**\n\n“I was attracted to GitLab because I knew that I had the ability to make an impact. Being remote has shattered the walls between people and teams, so anybody can approach anybody. If something means something to you, you can really work on it. This culture of transparency and collaboration is really important to me.” - [Sri Rangan](/company/team/#sri19), Fullstack Engineer, Incubation Engineering Team\n\n“People are attracted to the global diversity of the team and working asynchronously. I think we have a special working culture at GitLab. When you join, whether you're the manager of multiple people or a manager of yourself, you work asynchronously regardless of where your teams are.” - [Mek Stritti](/company/team/#meks), VP, Quality\n\n“Before coming to GitLab, I was a frontend, backend, Android developer, data scientist, and machine learning engineer, among other things. But the thing about how I work is that I like to switch between those roles. And normally in companies, you can't grow across all those roles. You need to grow as a specialist, not a generalist. But within the Incubation Engineering team, I get to do that.” - [Eduardo Bonet](/company/team/#eduardobonet), Fullstack Engineer, Incubation Engineering Team\n\n“The feedback that I quite often hear from engineers is just how strong the team is around them, and how collaborative the rest of the organization is. For my team in particular, a big part of their success is to be able to collaborate effectively with both the people that they work with and other teams. A lot of candidates are attracted to GitLab by the transparency value. Transparency is something that we really try to encourage, and it becomes a big mindset.” - [Bartek Marnane](/company/team/#bmarnane), VP, Incubation Engineering\n\n## How we ensure collaboration across the organization \n\nBeyond the aspects of GitLab that attracted many of our current engineers, it was clear that the culture they experienced during their time here ensured there was collaboration across various teams within our engineering organization. \n\n\"We have an organization that supports each other. You propose a feature, you're building something, and you can collaborate very easily across the globe, across departments with people in infrastructure and security. So when you're building something it's not all on you to ensure its stability and reliability and safety – the entire organization takes ownership of that.” - [Darby Frey](/company/team/#darbyfrey), Fullstack Engineer, Incubation Engineering\n\n“We have a strong culture of collaboration, people reach out and say, “Hey, I'm looking for someone to dogfood this,” and we're always willing to pick those up. Our team has a goal to dogfood a new feature every milestone.” - [Kyle Wiebers](/company/team/#kwiebers), Manager, Engineering, Quality Team\n\n## Why we believe in iteration (and building boring solutions when they work)\n\nOur engineering teams are always thinking about how to best deliver value and receive feedback along the way. It turns out that iteration and building boring solutions that can be delivered quickly is a great way to deliver value and receive feedback. For example, our [Incubation team](/handbook/engineering/development/incubation/) is working to move away from the natural instinct to develop a prototype, get it working, then putting it into the product.\n\n“We’re asking,'how can we look at what you are planning on doing, and then divide that into milestones where every one of those milestones can be integrated into the product?' So we get value out of it and get feedback out of it as well.” - Bartek \n\nAcross other parts of GitLab’s engineering organization, the same type of approach is being embraced.\n\n“For my team, what we try to do is identify a big problem, and then identify lots of small solutions towards that problem. The embrace of efficiency and iteration really aligns with the team that I'm on.” - Kyle\n\n“We want to ship new features quickly so we can get feedback. That first version isn’t going to be perfect, but we're okay with that. We all agree that it's better to get feedback than to spend six months polishing every pixel on a feature that maybe no one wants, and then having to throw it out.” - Darby\n\nWhether it’s our Incubation Engineering team or Quality in Engineering team, embracing iteration and collaboration as a way to achieve results has become the standard approach. \n\nLearn more about how you can contribute to a culture of empathy and productivity by launching or progressing your career at GitLab by checking out our [careers page](/jobs/).\n",[1440,1440,9],{"slug":3502,"featured":6,"template":680},"how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization","content:en-us:blog:how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization.yml","How Gitlab Iteration Value Drives Innovation Through The Engineering Organization","en-us/blog/how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization.yml","en-us/blog/how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization",{"_path":3508,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3509,"content":3515,"config":3521,"_id":3523,"_type":14,"title":3524,"_source":16,"_file":3525,"_stem":3526,"_extension":19},"/en-us/blog/how-gitlab-measures-red-team-impact-the-adoption-rate-metric",{"title":3510,"description":3511,"ogTitle":3510,"ogDescription":3511,"noIndex":6,"ogImage":3512,"ogUrl":3513,"ogSiteName":667,"ogType":668,"canonicalUrls":3513,"schema":3514},"How GitLab measures Red Team impact: The adoption rate metric","Follow our journey to develop and implement better metrics, including how we used GitLab to track our results end-to-end. Also find out the lessons learned along the way.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663239/Blog/Hero%20Images/AdobeStock_1023776629.jpg","https://about.gitlab.com/blog/how-gitlab-measures-red-team-impact-the-adoption-rate-metric","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab measures Red Team impact: The adoption rate metric\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Moberly\"}],\n        \"datePublished\": \"2025-03-05\",\n      }",{"title":3510,"description":3511,"authors":3516,"heroImage":3512,"date":3518,"body":3519,"category":720,"tags":3520},[3517],"Chris Moberly","2025-03-05","In early 2024, we started a journey to implement better metrics for [our internal Red Team](https://handbook.gitlab.com/handbook/security/security-operations/red-team/). Our first iteration focused on what we now call the adoption rate metric, which measures how often the recommendations our team makes are accepted and implemented.\n\nChoosing this metric was very deliberate. While there are many ways to measure a Red Team's impact, we wanted to start with something fundamental: Are we actually driving meaningful security improvements? The adoption rate directly ties our work to real security outcomes, and we could measure it using tools and processes we already had in place.\n\nIn this article, you'll discover how we used GitLab to track these results end-to-end, some lessons we learned (including what we would have done differently), and our plans to tackle the next set of metrics.\n\n## How we implemented the adoption rate metric\n\nWe use GitLab extensively for our Red Team planning, execution, and reporting. Every operation wraps up with a report that's written in markdown in a dedicated GitLab project. Each report contains a section called \"Recommendations\" with a list of suggestions to make GitLab more secure.\n\nThose recommendations are always linked to a dedicated issue, which we open in the project closest to the team who can address it. If we're suggesting a product feature, it goes directly in that tracker. If it's a detection capability, it goes into the detections as code repository. We always assign a directly responsible individual (DRI) in the group that owns that space, and we use [this issue template](https://gitlab.com/gitlab-com/gl-security/security-operations/redteam/redteam-public/resources/red-team-issue-templates/-/blob/main/.gitlab/issue_templates/recommendation.md?ref_type=heads) to ensure consistency in describing the problem, the risk, and potential solutions.\n\n![Red team - recommendation-example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674984/Blog/Content%20Images/recommendation-example__1_.png)\n\nHere's where the tracking logistics come in. We use GitLab labels to classify the recommendation across three categories:\n\n- Detections and alerts (`RTRec::Detection`)  \n- Security controls (`RTRec::Control`)  \n- Processes and procedures (`RTRec::Process`)\n\nWe then use another set of labels to follow the lifecycle of that recommendation – from review all the way through adoption:\n\n- Under review (`RecOutcome::UnderReview`)  \n- Accepted and actively being worked on (`RecOutcome::InProgress`)  \n- Accepted but backlogged (`RecOutcome::Backlogged`)  \n- Accepted but blocked (`RecOutcome::Blocked`)  \n- Fully adopted and closed (`RecOutcome::Adopted`)  \n- Partially adopted and closed (`RecOutcome::PartiallyAdopted`)  \n- Not adopted and closed (`RecOutcome::NotAdopted`)\n\n## How we stay on top of recommendations\n\nWe use a new GitLab feature called [\"GitLab Query Language\" (GLQL)](https://docs.gitlab.com/ee/user/glql/) to build a dynamic Security Recommendations Dashboard inside a GitLab issue.\n\nThis issue allows us to quickly identify things like:\n\n- open recommendations that haven't been updated recently  \n- open recommendations that have been backlogged for an extended period of time  \n- closed recommendations that weren't properly labeled with an adoption outcome\n\nWe've found this process encourages the Red Team to follow up on stale recommendations, reaching out to the owners and seeing how we can help get them adopted.\n\nGLQL is very cool, and allows us to turn a short code block like this:\n\n```yaml  \n---  \ndisplay: table  \nfields: title, labels(\"RTRec::*\"), labels(\"RecOutcome::*\"), created, updated  \n---  \ngroup = \"gitlab-com\"  \nAND label = \"RTRec::*\"  \nAND opened = true  \n```\n\n... into a dynamic table like this:\n\n![Red Team - GLQL table](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674984/Blog/Content%20Images/glql-table.png)\n\nThat table for us is very tactical and we use it to keep things moving. Beyond that, we also visualize the adoption rate trends over time. That allows us to look at things like quarterly adoption rate percentages, how long different types of recommendations take to adopt and implement, and how these figures vary across departments.\n\n## Lessons learned\n\n**1. Start with metrics in place; don't wait for your program to mature first.**\n\nEarly in our Red Team's development, we focused more on how we would execute operations and less on how we would measure them. The idea of using metrics to distill complex operations into simple numbers felt like it might oversimplify our work. But we've learned that thoughtful metrics don't reduce the value of Red Team operations - they help demonstrate our impact and guide our program's growth. Starting with clear metrics earlier would have accelerated this growth.\n\nImplementing these metrics later meant spending significant time reformatting years of historical recommendations to enable consistent analysis. Had we planned for metrics from the start, we could have saved ourselves a lot of time and effort.\n\nWe’re keeping this lesson in mind as we start on our next set of metrics, threat resilience, which we talk about below.\n\n**2. Don't operate in a silo.**\n\nRed Teams aren't the only groups that provide recommendations in a security organization. At GitLab, we have our bug bounty program, our external pentests, product security, security assurance, and security operations.\n\nOn the Red Team, we developed our own recommendations process from scratch. It's been fairly effective, but we have noticed some areas for improvement, particularly around prioritization, project management, and alignment with our organization's risk reporting process.\n\nWe also noticed that some other teams are really good at these areas such as our bug bounty program and the triaging of findings from our external pentests. Those particular groups are very good at delivering product recommendations, and we've been learning from their approach to improve our own delivery methods.\n\nSo we've taken our success with visualizing metrics and are integrating these lessons to create a more standard format that can be used across teams. This will allow us to leverage things that are working well, like our adoption rate metric, and combine them with the more efficiently managed processes used by other groups to ultimately achieve a higher adoption rate and a more secure GitLab.\n\n## Next up: Measuring our threat resilience\n\nNext up for us is implementing metrics around threat resilience. We want to measure how well GitLab can prevent, detect, and respond to the threats most relevant to our organization. We're building a dashboard that will help visualize this data, showing our top threat actors and a series of scores that measure how well we defend against their specific techniques.\n\nOur goal is to have this dashboard drive decisions around what Red Team operations to conduct, what defensive capabilities to improve, and in general where we should be investing time and effort across our entire security division.\n\nWe hope to consolidate our existing tools in this process and are currently evaluating solutions. We'll share more info when we've achieved some success here.\n\n## Key takeaways and how to get started\n\nIf you're looking to measure your Red Team's impact, here's what we've learned:\n\n1. Start tracking metrics early, even if they're not perfect.  \n2. Focus on actionable metrics first (like adoption rate).  \n3. Use your existing tools. We used GitLab and Tableau, but the approach works with any tracking system.  \n4. Collaborate across security teams to leverage existing processes when possible.\n\nWe'd love to hear about your experience with metrics in security so drop a comment below or open an issue in one of our [public projects](https://gitlab.com/gitlab-com/gl-security/security-operations/redteam/redteam-public).\n\n## Read more from GitLab's Red Team  \n- [Stealth operations: The evolution of GitLab's Red Team](https://about.gitlab.com/blog/stealth-operations-the-evolution-of-gitlabs-red-team/)  \n- [How GitLab's Red Team automates C2 testing](https://about.gitlab.com/blog/how-gitlabs-red-team-automates-c2-testing/)  \n- [How we run Red Team operations remotely](https://about.gitlab.com/blog/how-we-run-red-team-operations-remotely/)",[720,1298,9],{"slug":3522,"featured":6,"template":680},"how-gitlab-measures-red-team-impact-the-adoption-rate-metric","content:en-us:blog:how-gitlab-measures-red-team-impact-the-adoption-rate-metric.yml","How Gitlab Measures Red Team Impact The Adoption Rate Metric","en-us/blog/how-gitlab-measures-red-team-impact-the-adoption-rate-metric.yml","en-us/blog/how-gitlab-measures-red-team-impact-the-adoption-rate-metric",{"_path":3528,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3529,"content":3535,"config":3540,"_id":3542,"_type":14,"title":3543,"_source":16,"_file":3544,"_stem":3545,"_extension":19},"/en-us/blog/how-gitlab-pages-uses-the-gitlab-api",{"title":3530,"description":3531,"ogTitle":3530,"ogDescription":3531,"noIndex":6,"ogImage":3532,"ogUrl":3533,"ogSiteName":667,"ogType":668,"canonicalUrls":3533,"schema":3534},"How GitLab Pages uses the GitLab API to serve content","GitLab Pages is changing the way it reads a project's configuration to speed up booting times and slowly remove its dependency to NFS.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679634/Blog/Hero%20Images/retrosupply-jLwVAUtLOAQ-unsplash.jpg","https://about.gitlab.com/blog/how-gitlab-pages-uses-the-gitlab-api-to-serve-content","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab Pages uses the GitLab API to serve content\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jaime Martínez\"}],\n        \"datePublished\": \"2020-08-03\",\n      }",{"title":3530,"description":3531,"authors":3536,"heroImage":3532,"date":2899,"body":3538,"category":743,"tags":3539},[3537],"Jaime Martínez","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-11-13.\n{: .alert .alert-info .note}\n\n[GitLab Pages](/stages-devops-lifecycle/pages/) allows you to create and host GitLab project websites from a user account or group for free on [GitLab.com](https://www.gitlab.com/) or on your self-managed GitLab instance.\n\nIn this post, I will explain how the [GitLab Pages daemon](https://gitlab.com/gitlab-org/gitlab-pages) obtains a domain's configuration using the\nGitLab API, specifically on [GitLab.com](https://www.gitlab.com/).\n\n## How does GitLab Pages know where to find your website files?\n\nGitLab Pages will use object storage to store the contents of your web site. You can follow the development of this new feature [here](https://gitlab.com/groups/gitlab-org/-/epics/3901).\n\nCurrently, GitLab Pages uses an NFS shared mount drive to store the contents of your website.\nYou can define the value of this path by defining the [`pages_path`](https://docs.gitlab.com/ee/administration/pages/#change-storage-path) in your `/etc/gitlab/gitlab.rb` file.\n\nWhen you deploy a website using the `pages:` keyword in your `.gitlab-ci.yml` file, a `public` path artifact must be defined, containing the files available for your website. This `public` artifact eventually makes its way into the NFS shared mount.\n\nWhen you deploy a website to GitLab Pages a domain will be created based on the [custom Pages domain you have configured](https://docs.gitlab.com/ee/administration/pages/#configuration). For [GitLab.com](https://www.gitlab.com/), the pages domain is `*.gitlab.io`, if you create a project named `myproject.gitlab.io` and enable HTTPS, a wildcard SSL certificate will be used.\nYou can also [setup a custom domain](https://docs.gitlab.com/ee/user/project/pages/custom_domains_ssl_tls_certification/) for your project, for example `myawesomedomain.com`.\n\nFor every project (aka domain) that is served by the Pages daemon, there must be a directory in the NFS shared mount that matches your domain name and holds its contents. For example, if we had a project named `myproject.gitlab.io`, the Pages daemon would look for your `.html` files under `/path/to/shared/pages/myproject/myproject.gitlab.io/public` directory.\nThis is how GitLab Pages serves the content published by the `pages:` keyword in your CI configuration.\n\nBefore [GitLab 12.10](/releases/2020/04/22/gitlab-12-10-released/) was released, the Pages daemon would rely on a file named `config.json` located in your project's directory in the NFS shared mount, that is `/path/to/shared/pages/myproject/myproject.gitlab.io/config.json`.\nThis file contains metadata related to your project and [custom domain names](https://docs.gitlab.com/ee/user/project/pages) you may have setup.\n\n```json\n{\n  \"domains\":[\n    {\n      \"Domain\":\"myproject.gitlab.io\"\n    },\n    {\n      \"Domain\": \"mycustomdomain.com\",\n      \"Certificate\": \"--certificate contents--\",\n      \"Key\": \"--key contents--\"\n    }\n  ],\n  \"id\":123,\n  \"access_control\":true,\n  \"https_only\":true\n}\n```\nGitLab Pages has been a very popular addition to GitLab, and the number of hosted websites on GitLab.com has increased over time. We are currently hosting over 251,000 websites!\nOn start-up, the Pages daemon would [traverse all directories](https://gitlab.com/gitlab-org/gitlab-pages/-/blob/v1.21.0/app.go#L448) in the NFS shared mount and load the configuration of all the deployed Pages projects into memory.\nBefore 09-19-2019, the Pages daemon would take [approximately 25 minutes to be ready to serve requests](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/252) per instance on GitLab.com.\nAfter upgrading GitLab Pages to version [`v1.9.0`](https://gitlab.com/gitlab-org/gitlab-pages/-/merge_requests/185), there were some improvements in some dependencies that reduced booting time to approximately five minutes. This was great but not ideal.\n\n## GitLab API-based configuration\n\nAPI-based configuration was [introduced](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/282) in GitLab 12.10.\nWith API-based configuration, the daemon will start serving content in just a few seconds after booting.\nFor example, for a particular Pages node on GitLab.com, it usually is ready to serve content within one minute after starting.\n\nOn [GitLab.com](https://www.gitlab.com/), the Pages daemon now sources the domain configuration via an internal API endpoint\n`/api/v4/internal/pages?domain=myproject.gitlab.io`.\nThis is done on demand per domain and the configuration is cached in memory for a certain period of time to speed up serving content from that Pages node.\n\nThe response from the API is very similar to the contents of the `config.json` file:\n\n```json\n{\n    \"certificate\": \"--cert-contents--\",\n    \"key\": \"--key-contents--\",\n    \"lookup_paths\": [\n        {\n            \"access_control\": true,\n            \"https_only\": true,\n            \"prefix\": \"/\",\n            \"project_id\": 123,\n            \"source\": {\n                \"path\": \"myproject/myproject.gitlab.io/public/\",\n                \"type\": \"file\"\n            }\n        }\n    ]\n}\n```\n\nYou can see that the source type is `file`. This means that the Pages daemon will still serve the contents from the NFS shared mount. We are actively working on removing the NFS dependency from GitLab Pages by [updating the GitLab Pages architecture](https://gitlab.com/groups/gitlab-org/-/epics/1316).\n\nWe are planning to [transition GitLab pages to object storage instead of NFS](https://gitlab.com/groups/gitlab-org/-/epics/3901). This will essentially [enable GitLab Pages to run on Kubernetes](https://gitlab.com/gitlab-org/gitlab/-/issues/39586) in the future.\n\n**Update**:\nWe have now [rolled out zip source type on GitLab.com](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/2808). This is behavior is behind feature flag and it's not the final implementation.\nAs of 10-22-2020 we serve about 75% of Pages projects from zip and object storage and we're getting closer to removing the NFS dependency!\n\n## Self-managed GitLab instances\n\nThe changes to the GitLab Pages architecture were piloted on GitLab.com, which is possibly the largest GitLab Pages implementation.\nOnce all the changes supporting the move to an API-based configuration are completed, they will be rolled out to self-managed customers.\nYou can find more details and the issues we faced while rolling out API-based configuration in this [issue](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/282).\n\nIf you can't wait to speed up your Pages nodes startup, we have a potential guide in this [issue description](https://gitlab.com/gitlab-org/gitlab/-/issues/28298#potential-workaround) which explains how we enabled the API on GitLab.com. However, this method will be removed in the near future.\n\n**Update**:\nYou can now enable API-based configuration by following [this guide](https://docs.gitlab.com/ee/administration/pages/#gitlab-api-based-configuration).\n\n## Domain source configuration and API status\n\nIn the meantime, we are working toward adding [a new configuration flag for GitLab Pages](https://gitlab.com/gitlab-org/gitlab/-/issues/217912) which will allow you to choose the domain configuration source by specifying `domain_config_source` in your `/etc/gitlab/gitlab.rb` file.\nBy default, GitLab Pages will use the `disk` source configuration the same way is used today.\n\nIn the background, the Pages daemon will start [checking the API status](https://gitlab.com/gitlab-org/gitlab-pages/-/merge_requests/304) by calling the `/api/v4/internal/pages/status` endpoint. This will help you check if the Pages daemon is ready to talk to the GitLab API, especially when you are [running Pages on a separate server](https://docs.gitlab.com/ee/administration/pages/#running-gitlab-pages-on-a-separate-server).\n\nPlease check the [GitLab Pages adminstration guide](https://docs.gitlab.com/ee/administration/pages/#troubleshooting) for further troubleshooting.\n\n\u003C!-- image: image-url -->\nCover image by [@RetroSupply](https://unsplash.com/@retrosupply) on [Unsplash](https://unsplash.com/photos/jLwVAUtLOAQ)\n{: .note}\n",[9,1091],{"slug":3541,"featured":6,"template":680},"how-gitlab-pages-uses-the-gitlab-api","content:en-us:blog:how-gitlab-pages-uses-the-gitlab-api.yml","How Gitlab Pages Uses The Gitlab Api","en-us/blog/how-gitlab-pages-uses-the-gitlab-api.yml","en-us/blog/how-gitlab-pages-uses-the-gitlab-api",{"_path":3547,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3548,"content":3554,"config":3561,"_id":3563,"_type":14,"title":3564,"_source":16,"_file":3565,"_stem":3566,"_extension":19},"/en-us/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers",{"title":3549,"description":3550,"ogTitle":3549,"ogDescription":3550,"noIndex":6,"ogImage":3551,"ogUrl":3552,"ogSiteName":667,"ogType":668,"canonicalUrls":3552,"schema":3553},"How GitLab uses prompt guardrails to help protect customers","Learn what prompt guardrails are, how they help mitigate security risks, and what unique considerations GitLab has taken into account when implementing them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663918/Blog/Hero%20Images/aipower.jpg","https://about.gitlab.com/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab uses prompt guardrails to help protect customers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"},{\"@type\":\"Person\",\"name\":\"Roger Woo\"}],\n        \"datePublished\": \"2025-01-30\",\n      }",{"title":3549,"description":3550,"authors":3555,"heroImage":3551,"date":3558,"body":3559,"category":1839,"tags":3560},[3556,3557],"David O'Regan","Roger Woo","2025-01-30","Imagine introducing a powerful new AI tool that boosts your team's productivity — accelerating code development, resolving issues faster, and streamlining workflows. The excitement is palpable, but questions about security and compliance quickly arise. How do you manage the risk of AI inadvertently exposing sensitive data or responding to malicious prompts? This is where prompt guardrails play a crucial role.\n\nPrompt guardrails are structured safeguards – combining instructions, filters, and context boundaries – designed to guide AI models toward secure and reliable responses. Think of them as safety rails on a bridge, working to keep data and interactions on the correct path while supporting your organization's security protocols. In this article, we'll explore how GitLab implements these guardrails, the risks they address, and their importance for security-conscious enterprises and compliance-focused teams.\n\n## Why prompt guardrails matter\n\nAI models have transformed how organizations work, offering powerful tools to enhance productivity and innovation. However, this power comes with inherent risks. Without safeguards, AI systems may unintentionally disclose sensitive information, such as personally identifiable information (PII) or proprietary business data, or potentially act on malicious instructions. Prompt guardrails address these challenges by creating boundaries for AI models to access and process approved content, contributing to reduced risk of unintended data exposure or manipulation.\n\nFor businesses operating under strict regulations like GDPR, prompt guardrails serve as essential protection mechanisms. More importantly, they build trust among decision-makers, end users, and customers, demonstrating [GitLab's commitment to secure and responsible AI usage](https://about.gitlab.com/blog/introducing-the-gitlab-ai-transparency-center/). With prompt guardrails in place, teams can embrace AI's potential while maintaining focus on protecting their critical assets.\n\n## GitLab’s approach to prompt guardrails\n\nAt GitLab, we're [building AI features](https://about.gitlab.com/blog/categories/ai-ml/) with security, transparency, and accountability in mind because we understand these elements are critical for our enterprise customers and their auditors.  \n\nHere’s how we’re putting that into practice.\n\n### Structured prompts and context boundaries\n\nOur system utilizes tags – like `\u003Cselected_code>` or `\u003Clog>` – to define boundaries for AI model interactions. When users ask GitLab Duo to troubleshoot a job failure, relevant logs are encapsulated in `\u003Clog>` tags. This structure guides the model to focus on specific data while working to prevent the influence from unauthorized or out-of-scope information.\n\n### Filtering and scanning tools\n\nWe employ tools like Gitleaks to scan inputs for secrets (API keys, passwords, etc.) before transmission to the AI. This filtering process helps minimize the potential for exposing confidential information or sending credentials into a model's prompt.\n\n### Role-based insights\n\nOur guardrails support focused AI discussions while contributing to customers' compliance efforts through controlled data handling and clear documentation. Organizations can adopt AI solutions designed to align with enterprise policies and risk tolerances.\n\n## Different approaches to prompt guardrails\n\nPrompt guardrails aren't one-size-fits-all solutions. Different strategies offer unique advantages, with effectiveness varying by use case and organizational requirements. GitLab combines multiple approaches to create a comprehensive system designed to balance security with usability.\n\n### System-level filters: The first line of defense\n\nSystem-level filters serve as a proactive barrier, scanning prompts for restricted keywords, patterns, or potentially harmful content. These filters work to identify and block potential risks — such as profanity, malicious commands, or unauthorized requests — before they reach the AI model.\n\nThis approach requires continuous updates to maintain effectiveness. As threats evolve, maintaining current libraries of restricted keywords and patterns becomes crucial. GitLab integrates these filters into its workflows to address potential risks at the earliest stage.\n\n### Model instruction tuning: Teaching the AI to stay on track\n\nInstruction tuning involves configuring AI behavior to align with specific guidelines. Our AI models are designed to reduce potentially problematic behaviors like role play, impersonation, or generating inappropriate content.\n\nThis foundation supports responses that remain informative, professional, and focused. When summarizing discussions or analyzing code, the AI maintains focus on the provided context, ideally mitigating potential deviation into unrelated topics.\n\n### Sidecar or gateway solutions: Adding a layer of protection\n\nSidecar or gateway solutions function as security checkpoints between users and AI models, processing both inputs and outputs. Like a customs officer reviewing luggage, these components help ensure only appropriate content passes through.\n\nThis approach proves particularly valuable in environments requiring strict information control, such as regulated industries or compliance-driven workflows.\n\n### Why GitLab combines these approaches\n\nNo single strategy addresses all potential risks. GitLab's hybrid approach combines system-level filters, instruction tuning, and sidecar solutions to create a robust security framework while maintaining usability.\n\nSystem-level filters provide initial screening, while instruction tuning aligns AI behavior with security standards. Sidecar solutions offer additional oversight, supporting transparency and control over data flow.\n\nThis combination creates a framework designed to support confident AI adoption while aiming to protect sensitive data and maintain compliance requirements.\n\n## Lessons learned\n\nWhile prompt guardrails help to significantly reduce risks, no system is infallible. Here are some lessons we have learned along the way:\n\n* Overly restrictive rules might hamper legitimate usage, frustrate developers, or slow down workflows. Striking the right balance between protecting data and providing real value is key.\n* Threat landscapes change, as do the ways people use AI. Regular updates to guardrails support alignment with current requirements and potential threats\n* At GitLab, we understand that no system can promise absolute security. Instead of making guarantees, we emphasize how our guardrails are designed to reduce risks and strengthen your defenses. This transparent approach builds trust by acknowledging that security is an ongoing process — one that we continuously refine to help support your organization’s evolving needs.\n* We gather feedback from actual user scenarios to iterate on our guardrails. Real-world insights help us refine instructions, tighten filters, and improve scanning tools over time.\n\n## Summary\n\nPrompt guardrails go beyond being a technical solution — they represent GitLab’s commitment to prioritizing AI security for our customers. By helping to reduce exposure, block harmful inputs, and ensure clear traceability of AI interactions, these guardrails aim to provide your teams with the confidence to innovate securely.\n\nWith [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our structured prompts, scanning tools, and carefully tuned instructions work together to help keep AI capabilities aligned with compliance standards and best practices. Whether you’re a developer, auditor, or decision-maker, these safeguards aim to enable you to embrace AI confidently while staying true to your organization’s security and compliance goals.\n\n> [Learn more about GitLab Duo and get started with a free, 60-day trial today!](https://about.gitlab.com/gitlab-duo/)",[1299,9],{"slug":3562,"featured":91,"template":680},"how-gitlab-uses-prompt-guardrails-to-help-protect-customers","content:en-us:blog:how-gitlab-uses-prompt-guardrails-to-help-protect-customers.yml","How Gitlab Uses Prompt Guardrails To Help Protect Customers","en-us/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers.yml","en-us/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers",{"_path":3568,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3569,"content":3575,"config":3581,"_id":3583,"_type":14,"title":3584,"_source":16,"_file":3585,"_stem":3586,"_extension":19},"/en-us/blog/how-gitlabs-customer-and-partner-focus-fuels-our-culture",{"title":3570,"description":3571,"ogTitle":3570,"ogDescription":3571,"noIndex":6,"ogImage":3572,"ogUrl":3573,"ogSiteName":667,"ogType":668,"canonicalUrls":3573,"schema":3574},"How GitLab's customer and partner focus fuels our culture","It’s an exciting time to be working in a customer- or partner-facing role at GitLab. Our sales team members explain why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679412/Blog/Hero%20Images/sales_blog_image_tiny.jpg","https://about.gitlab.com/blog/how-gitlabs-customer-and-partner-focus-fuels-our-culture","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab's customer and partner focus fuels our culture\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jake Foster\"}],\n        \"datePublished\": \"2022-05-03\",\n      }",{"title":3570,"description":3571,"authors":3576,"heroImage":3572,"date":3578,"body":3579,"category":808,"tags":3580},[3577],"Jake Foster","2022-05-03","\n\nIt’s an exciting time to be working in a customer- or partner-facing role at GitLab. Our role with customers is to build personalized relationships and demonstrate how we can help them solve problems with a best-in-class DevOps platform. \n\nAs we grow, our customer and partner focus plays a key role in building a healthy, connected workplace culture at GitLab. So we asked some of our leaders and team members from across the Sales, Channel Partner, and Account Management teams to share their insights. Here’s what we learned.\n\n## The opportunity we have to become the leader in DevOps means hiring more top-tier talent \n\n\"We are on a journey as a company where we believe we have got this exciting market opportunity. We've got a great product that fits the market really well, and that product is an industry leader.\n\n\"We believe a lot of companies are going to buy DevOps. We need to make sure that they buy that from us and that's a hard thing. That execution requires lots of top talent. We want to keep growing, as a team and individually, to capture more market share. That's going to take a lot of people who are great at what they do.\"\n\n- Michael McBride (a.k.a \"McB\"), Chief Revenue Officer\n\n## Why GitLab is an ideal place to grow in a sales or channel partner role \n\n\"We have an integrated GTM with our field sales teams and channels and alliances partners. I look after both the sales organization that manages those partners and supports them and their engagement with our direct selling force, as well as the programs and enablement and functions that it takes to integrate those partners into our go-to-market. \n\n\"I believe we've got great technology, great market timing, high customer need, lots of customer value, and a great product. That makes for a pretty awesome mix from a partnering perspective. It’s lots of fun to manage partners who are aiming to grow their businesses at the same time. It’s going to make the partners very happy.\" \n\n- Michelle Hodges, VP, WW Channels \n\n\"At my previous company, we were an unknown entity and you had to really pull out all the stops to get people just to take a call with you or to test the product or buy the product. Whereas, with GitLab, I would get on calls and customers are super excited to meet people from GitLab. There were quite a few cases where people were already going to buy GitLab, but they just needed someone to help them understand what they wanted to buy. It was a salesperson's dream because you are working with people who not just love the product, but love what the company stands for. \n\n\"I remember one time I was in a coffee shop, and I had a GitLab sticker on my laptop. Someone saw that – he was a developer, he came up to me and said, 'Wow, you work at GitLab. I love that company and we use it in our team.' I felt a bit like a celebrity getting spotted on the streets.\" \n\n- Anthony Ogunbowale - Thomas, Named Account Executive, EMEA \n\n## What makes our culture unique \n\n\"The things in the [company handbook](/handbook/) can be kind of unbelievable to folks from the outside, when they say there's unlimited vacation time or they value results, not hours. But after being here for three years, it's true – there’s a real emphasis on valuing productivity and results. And, when people produce results, they’re rewarded.\" \n\n- Kevin Vogt, Federal Technical Account Manager\n\n\"I am not joking when I say this: This is the most successful I've ever felt in my career. And a lot of it is down to our values. \n\n\"We have a value system that's called [CREDIT](https://handbook.gitlab.com/handbook/values/): It's collaboration; results; efficiency; diversity, inclusion, and belonging; iteration; and transparency. You will find in every engagement with a GitLab team member that they work towards exhibiting those things in a really authentic, intentional manner. It makes it a great place to build relationships, but also to get your job done. It creates innovation, speed, and teamwork in a way that I haven't found before.\" \n\n- Michelle Hodges\n\n## How GitLab sets its team members up for success \n\n\"We 'dogfood' our tools. We use GitLab for everything from HR to legal – the entire company uses GitLab as a platform. \n\n\"The company is also great with training. Any time that I've ever wanted training for any kind of need in my business role, they've always provided it and reimbursed it. I just finished a month's worth of training classes on how to be a successful manager. That's my first month going into that role, trying to make sure that I can be set up for success in it.\" \n\n- Kevin Vogt\n\n\"Every conversation with the customer is a collaboration. In pre-sales, we have a solutions architect, who's more of a technical person, and they can help lead on answering technical questions or do demos and proof of concepts. And then, depending on how the conversation is going, we might bring on someone from Product, in relation to what the customer's looking at. Everyone in the organization works together to help the customer understand and feel comfortable with the solution.\" \n\n- Anthony Ogunbowale - Thomas\n\n\"McB, our CRO, does his own Reverse Ask Me Anything session for team members that are underrepresented in tech to understand what the experience is on the GitLab Sales team. And also what upward mobility and trajectory could look like in the company. \n\n\"I feel very supported here. I feel empowered. It's one of the first jobs I've felt where they just trust me. They tell me to take things and run with it.\" \n\n- Marcus Carter, Senior Sales Recruiter\n\n## What we’re looking for as we grow our team \n\n\"I would say, curiosity is huge. Somebody who's curious and doesn't mind asking questions. I'd say somebody who is customer-focused, somebody who's excited about our customers, and somebody who's excited about technology as a whole, and in how technology is set to advance us. It's someone who is tenacious, somebody who is unrelenting and trying to offer solutions.\" \n\n- Marcus Carter\n\n\"This is a place where we believe we have a large market in every single one of our territories. There are customers that need the right DevOps solution and our product fits with those customers really well. So that leaves one last thing, sales skill. \n\n\"That’s great for a sales rep. If I've got the right product and a solid market, I'm excited, because I know I can deliver the sales skill, especially if I've got the marketing support and all the other things that GitLab has.\" \n\n- Michael McBride\n\n\nIf GitLab sounds like the place for you, there’s plenty more to learn about what it’s like to be a part of our team on our [careers site](/jobs/). You can also [learn more about open roles on our team](https://boards.greenhouse.io/gitlab).\n",[9,810,1440],{"slug":3582,"featured":6,"template":680},"how-gitlabs-customer-and-partner-focus-fuels-our-culture","content:en-us:blog:how-gitlabs-customer-and-partner-focus-fuels-our-culture.yml","How Gitlabs Customer And Partner Focus Fuels Our Culture","en-us/blog/how-gitlabs-customer-and-partner-focus-fuels-our-culture.yml","en-us/blog/how-gitlabs-customer-and-partner-focus-fuels-our-culture",{"_path":3588,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3589,"content":3594,"config":3600,"_id":3602,"_type":14,"title":3603,"_source":16,"_file":3604,"_stem":3605,"_extension":19},"/en-us/blog/how-grammatech-and-gitlab-enables-better-devsecops",{"title":3590,"description":3591,"ogTitle":3590,"ogDescription":3591,"noIndex":6,"ogImage":2010,"ogUrl":3592,"ogSiteName":667,"ogType":668,"canonicalUrls":3592,"schema":3593},"How a new integration helps GitLab customers secure their code","GitLab Ultimate customers can use CodeSonar from GrammaTech for SAST and to bake protection into every stage of software development.","https://about.gitlab.com/blog/how-grammatech-and-gitlab-enables-better-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How a new integration helps GitLab customers secure their code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Simko\"}],\n        \"datePublished\": \"2021-08-20\",\n      }",{"title":3590,"description":3591,"authors":3595,"heroImage":2010,"date":3597,"body":3598,"category":743,"tags":3599},[3596],"Christian Simko","2021-08-20","\n\nSoftware development teams that embrace agile and [DevSecOps](/topics/devsecops/) are able to code with a security-first mindset, which is essential for industries that build particularly complicated products where security is paramount, like: Aerospace and defense, automotive, industrial controls, medical devices, and more.\n\nStatic application security testing (SAST) solutions, like [CodeSonar® from GrammaTech](https://www.grammatech.com/products/source-code-analysis), integrate directly into CI/CD pipelines to bake security into every step of the software development life cycle (SDLC) – protecting your products every step of the way. Security solutions like GrammaTech pair well with an all-in-one DevOps Platform like GitLab, and allow development teams to follow best practices and industry standards to develop code that is better quality and more secure.\n\n## The GrammaTech and GitLab integration\n\nThe GrammaTech module for [GitLab Ultimate](/pricing/ultimate/) provides native SAST capabilities that scan code for defects in CI/CD pipelines and eliminates the need for any integration and maintenance by users. It allows developers to assess code continuously, avoiding costly mistakes and the duplicative work associated with waiting until the testing phase to scan for security problems.\n\nWe recognize that developers face pressure to meet aggressive deadlines for delivering new software, as rolling releases and agile development practices have developers pushing new features and code into production faster. Integrating SAST tools like CodeSonar into a DevOps Platform like GitLab Ultimate is a natural consequence to more iterative development in companies that embrace DevSecOps practices. CodeSonar helps developers shift security left by detecting and eliminating bugs and vulnerabilities at the earliest stages of the SDLC.\n\n## SAST with CodeSonar\n\nCodeSonar uses a unified data flow and symbolic execution analysis to examine the computation of the complete application. This approach is deeper than typical pattern-matching syntax analysis, and discovers 3-5x more defects on average.\n\nStatic analysis is unlike other software development tools (i.e., testing tools, compilers, and configuration management) becuase it can be integrated into the development process at any time with ease. CodeSonar simply attaches to your existing build environments to add analysis information to your verification process.\n\n### How does CodeSonar work?\n\nLike a compiler, CodeSonar does a \"build\" of your code using the existing build environment, but instead of creating object code, CodeSonar creates an abstract model of your entire program. From the derived model, CodeSonar's symbolic execution engine explores program paths, reasoning about program variables, and how they relate. Advanced theorem-proving technology prunes infeasible program paths from the exploration.\n\n![How CodeSonar works to secure code](https://about.gitlab.com/images/blogimages/codesonar.png){: .shadow.center}\nSee how CodeSonar secures code.\n{: .note.text-center}\n\nCheckers in CodeSonar perform static code analysis to find common defects, violations of policies, etc. Checkers operate by traversing or querying the model and looking for particular properties or patterns that indicate defects. Sophisticated symbolic execution techniques explore paths through a control-flow graph – the data structure representing paths that might be traversed by a program during its execution. When the path exploration notices an anomaly, a warning is generated.\n\nAn astronomical number of combinations of circumstances must be modeled and explored, so CodeSonar employs a variety of strategies to ensure scalability. For example, procedure summaries are refined and compacted during the analysis, and paths are explored in a way that minimizes paging.\n\n## Continuous Integration enabled by GitLab\n\nIntegrating CodeSonar into GitLab's pipeline is done with each [merge request (MR)](https://docs.gitlab.com/ee/user/project/merge_requests/), automatically analyzing your code and returning any vulnerabilities found via the GitLab SAST interface. Users can consult the GitLab Security Dashboard to get an overview of code security, and the Vulnerability Report gets into the details.\n\n![How CodeSonar integrates with GitLab CI pipelines](https://about.gitlab.com/images/blogimages/codesonar2.png){: .shadow.center}\nHow CodeSonar integrates with GitLab CI pipelines.\n{: .note.text-center}\n\n### Review CodeSonar warnings in GitLab Vulnerability Reports\n\nCodeSonar displays vulnerabilities right in the GitLab UI – you can review a warning, create a GitLab issue, and assign it to a developer – all in a single application. You can also dismiss vulnerabilities. CodeSonar's fingerprinting technology ensures that GitLab won't ever show dismissed vulnerabilities to you again.\n\n### Get a more detailed warning view\n\nSometimes you need more information to decide how to handle a particular warning. CodeSonar and GitLab make this easy. The CodeSonar warning message can be viewed directly in GitLab, and CodeSonar's detailed warning reports with annotated source code are just a click away – no copy and pasting, or searching for line numbers.\n\n![Example of GitLab vulnerability report](https://about.gitlab.com/images/blogimages/codesonar3.png){: .shadow.center}\nSee example of a GitLab vulnerability report and detailed view of warnings.\n{: .note.text-center}\n\n## How to get started\n\nA typical way to use the GitLab CI/CD pipeline is to set it up to run whenever new Git commits are submitted to a MR. When you add CodeSonar static analysis to your MR pipeline, GitLab will display the new analysis warnings on the MR page. The full set of warnings is always available on the pipeline page.\n\n### Prerequisites to use CodeSonar\n\n1. The CodeSonar integration requires a working instance of *GitLab Ultimate edition*.\n2. You must have a source code project in your GitLab instance that you wish to analyze. Set up a [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) pipeline for your project that can build your source code. This will include the configuration of one or more GitLab pipeline jobs ([more on how to configure GitLab Runners](https://docs.gitlab.com/runner/configuration/)).\n3. If you use Docker, ensure you have [Docker Engine](https://docs.docker.com/engine/install/) version 19.03.12 or later.\n4. Use the CodeSonar software package that is appropriate for your GitLab pipeline job runner's operating platform.\n5. Set up a dedicated, \"persistent\" CodeSonar Hub to coordinate and receive the results of your analysis. See your CodeSonar manual for how to set up and license a Hub.\n6. You will need a valid CodeSonar Hub license that is appropriate to your configuration and the CodeSonar GitLab Integration software package.\n\nRead the [instructions on installing the CodeSonar GitLab integration](https://support.grammatech.com/documentation/codesonar/integrations/gitlab/).\n\n_Christian Simko is the Director of Product Marketing at GrammaTech._\n",[720,231,9],{"slug":3601,"featured":6,"template":680},"how-grammatech-and-gitlab-enables-better-devsecops","content:en-us:blog:how-grammatech-and-gitlab-enables-better-devsecops.yml","How Grammatech And Gitlab Enables Better Devsecops","en-us/blog/how-grammatech-and-gitlab-enables-better-devsecops.yml","en-us/blog/how-grammatech-and-gitlab-enables-better-devsecops",{"_path":3607,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3608,"content":3614,"config":3619,"_id":3621,"_type":14,"title":3622,"_source":16,"_file":3623,"_stem":3624,"_extension":19},"/en-us/blog/how-holistic-ux-design-increased-gitlab-free-trial-signups",{"title":3609,"description":3610,"ogTitle":3609,"ogDescription":3610,"noIndex":6,"ogImage":3611,"ogUrl":3612,"ogSiteName":667,"ogType":668,"canonicalUrls":3612,"schema":3613},"How holistic UX design increased GitLab.com free trial signups","We boosted free trial signups by 141% by focusing on designing whole experiences instead of separate screens, small interactions, or pieces of UI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681113/Blog/Hero%20Images/user-journey-map.jpg","https://about.gitlab.com/blog/how-holistic-ux-design-increased-gitlab-free-trial-signups","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How holistic UX design increased GitLab.com free trial signups\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matej Latin\"}],\n        \"datePublished\": \"2020-02-27\",\n      }",{"title":3609,"description":3610,"authors":3615,"heroImage":3611,"date":3616,"body":3617,"category":675,"tags":3618},[1897],"2020-02-27","\n\nOur new improved free trial signup flow launched in October 2019 and it reduced the number of interactions a user needed to do to complete the process from around 35 to 15. We reduced the time required to sign up and start a trial from more than five minutes to around 2.5 minutes – less than half of the original. Not surprisingly, our free trial signups soon went from around 400 per week to more than 800. This is the journey of three designers battling the complexity that comes with user experiences that weren’t designed holistically but instead grew “organically.”\n\n## Discovering the problems\n\nI started working on designing a new user onboarding experience sometime in the second quarter of 2019. The first step I took was to map the existing user journey from when users sign up for GitLab to the end of the existing onboarding. I wanted insight into the mindset of users at the moment they finished signing up for a free trial. We wanted our users to be excited and eager about the onboarding experience. I never expected to find what I did by mapping the current user journey.\n\n![GitLab’s marketing page](https://about.gitlab.com/images/blogimages/free-trial-improvements/homepage.jpg){: .large.shadow.center}\n\nI started to map the journey on the homepage of our marketing website and clicked on the big orange “Try GitLab for FREE” button. That took me to our free trial landing page where a user can choose between trialing GitLab as a SaaS (GitLab.com, hosted by GitLab) or self-managed (GitLab Self-Managed) solution. And this is where the problems started to appear.\n\n### Symptoms of a broken user experience\n\nThe two options for trialing GitLab (SaaS or Self-Managed) were presented by two tabs, one of which (Self-Managed) was active by default. To start a Self-Managed trial, the user had to fill in a large form right away. The SaaS option, on the other hand, only required a click on a button. My assumption here was that setting up a Self-Managed GitLab trial takes much longer so I concluded that someone who just stumbled upon GitLab is more likely to try it out as a SaaS. But on this page, few users actually noticed that option.\n\n![Original Free trial landing page](https://about.gitlab.com/images/blogimages/free-trial-improvements/landing-page.jpg){: .large.shadow.center}\n\nProblems identified:\n\n1. Self-Managed is the prioritized option but users need to fill in a large form to get started. Huge drop-off is expected even before the signup flow started.\n2. Affordance issues: the second option (the non-active one) was barely discoverable because of the way it was presented. The contrast was too low and most users missed it.\n3. Even the simpler option for starting a SaaS trial had instructions that needed to be followed. Most users missed these instructions and simply clicked on the big orange button labeled “Start Your Trial.”\n\n![Instructions](https://about.gitlab.com/images/blogimages/free-trial-improvements/instructions.jpg){: .shadow.medium.center}\n\nSigning up for a SaaS GitLab trial required users to complete two separate steps in the correct order. If step 1 wasn't completed, clicking on the “Start Your Trial” button led to a free trial signup flow that couldn’t be completed.\n\nSo a user would either have to fill in a large form and install their own instance of GitLab or follow these instructions to start a trial on GitLab.com. This reminds me of a design joke I heard ages ago but it stuck with me because it’s so true:\n> Design is like a joke: if it needs an explanation, it’s not a good joke.\n\u003C!-- ### Two separate steps to sign up for a free trial -->\n\nI didn’t know this at the time but these instructions where there for a reason. Users needed to complete two separate steps in two different applications to successfully sign up for a free trial – GitLab.com and a tool we call Subscription Manager. That’s why we had these instructions written on this page and that’s why the experience was completely broken if they weren’t followed. The following is the user journey map that I created:\n\n![Original user journey map](https://about.gitlab.com/images/blogimages/free-trial-improvements/original-user-journey-map.jpg){: .large.shadow.center}\n\nAltogether, it took users more than five minutes and almost 40 interactions to complete the process. When I say “interactions” I mean things like clicking a button, landing on a page, filling in a form field and similar. A user who just completed the process of signing up for a free trial of a tool should feel excited, but in our cause they most probably felt exhausted. You can [watch my video walkthrough of the experience](https://www.youtube.com/watch?v=O-zjek64d0g&feature=youtu.be) as it was at the time. Here are the key points of the experience:\n\nUsers had to sign up for a [GitLab.com](http://gitlab.com) account first. After this step, they were shown an “Almost finished” message as they had to confirm their email by clicking on a link in an email message that was automatically sent.\n\n![Registration form](https://about.gitlab.com/images/blogimages/free-trial-improvements/registration-form.jpg){: .large.shadow.center}\n\nProblems discovered:\n\n- We asked for a lot of information, probably too much for simply signing up.\n- We sent the newly signed-up users to their inbox – a huge source of distractions.\n\nAfter they successfully confirmed their email, we showed them the following screen – the beginning of the Free Trial signup:\n\n![Free trial sign up](https://about.gitlab.com/images/blogimages/free-trial-improvements/free-trial-signup.jpg){: .large.shadow.center}\n\nProblems identified:\n\n- Visual style was different.\n- We asked for a lot of information again. A lot of this we already had from their GitLab.com signup but we didn’t use any of it to pre-fill the form.\n\nAfter they filled in and submitted the Free Trial signup form, they were shown the following from the Subscription Manager app. This is when the users started to interact with the second app.\n\n![Subscription manager](https://about.gitlab.com/images/blogimages/free-trial-improvements/subscription-manager.jpg){: .large.shadow.center}\n\nProblems identified:\n\n- We told the users to confirm their email address again. It’s a different app for us, but for them it’s all GitLab.\n- The most obvious next step – confirming the email address – actually led to a broken flow that couldn’t be completed.\n- This screen created a lot of confusion and users didn’t know what they had to do. Sign in, register, or sign in with GitLab.com?\n\nIn the end, signing in with GitLab.com was the only way to successfully complete the process. It took the users to the next screen – activating their free trial.\n\n![Free trial activation](https://about.gitlab.com/images/blogimages/free-trial-improvements/free-trial-activation.jpg){: .large.shadow.center}\n\nProblems identified:\n\n- We asked the users to choose which group their free trial is for. We asked this even if the user had no groups created at all. In that case, the users could only apply the trial to their namespace so the dropdown only had one option. As this was commonly the case, this step was unneeded manual work.\n\nTo add to the confusion, we sent users to the final screen in the flow: the billing overview. The fact that we sent them to this screen wasn’t the problem, it was the information we showed.\n\n![Billing page](https://about.gitlab.com/images/blogimages/free-trial-improvements/billing.jpg){: .large.shadow.center}\n\nProblems identified:\n\n- We told the users they’re on the Gold Plan but we also showed the purchase options right below. Some users were confused about whether their trial was actually activated or not.\n\nWith all this done we could summarize what the main problems that needed to be solved were:\n\n- Two separate apps with different visual styles\n- The two apps didn’t work well with each other\n- We repeatedly asked for information users already provided\n- Poor flow of screens and unclear information architecture led to confusion. Users didn't know where they were and what they were required to do.\n\n## Fixing a broken flow\n\nOk, so at this point I learned that the flow for signing up for a free trial was disjointed and sometimes even broken. I recognized what the main reason for that was – separate applications not communicating with each other through some form of automation – as well as other UI and UX issues of course.  To tackle the main problem, I came up with a vision: *about one minute and no more than 15 interactions required to complete the free trial signup flow.* The main outcome I wanted to achieve with this work was to improve the state of mind a user is in after successfully signing up for a free trial – *excited* instead of *exhausted*.\n\n![Users state of mind](https://about.gitlab.com/images/blogimages/free-trial-improvements/user-state-of-mind.jpg){: .large.center}\n\nBut how do we get there? Well, first of all, we need to move away from forcing users to interact with two separate applications. We do that by moving the second part of the process into the first application (GitLab.com) and making it communicate with the other application in the background. I proposed a unified signup flow that happens in one application but is adapted based on the user’s intent. Is the person an existing GitLab.com user trying to sign up for a free trial? Or are they a new user and they need to sign up for both GitLab.com and Subscription Manager accounts?\n\n![Unified flow](https://about.gitlab.com/images/blogimages/free-trial-improvements/unified-flow.jpg){: .shadow.large.center}\n\nMy colleague [Timothy Noah](/company/team/#timnoah) took over from here as he was the designer working with the team that owned this part of the product. He completed a [UX scorecard](https://gitlab.com/gitlab-org/ux-research/issues/285) and [video-documented](https://www.youtube.com/watch?v=MkTOwTxsoL8) the flow again. The result of his work was a [well structured approach](https://gitlab.com/gitlab-org/ux-research/issues/304) to breaking things down into smaller steps but with a holistic overview. Based on all this work, he then created a proposal of what the user journey should be like.\n\n![Proposed user journey](https://about.gitlab.com/images/blogimages/free-trial-improvements/proposed-user-journey.jpg){: .shadow.large.center}\n\nAnd translated it into actual UI, pages and their flow:\n\n![Proposed flow](https://about.gitlab.com/images/blogimages/free-trial-improvements/proposed-flow.jpg){: .large.center}\n\n[This clickable prototype](https://sketch.cloud/s/v1zJb/a/mgkLnw/play) illustrates perfectly how the new free trial signup flow should behave. It’s immediately clear that it’s much simpler and more cohesive than the original.\n\nWith that we could also improve the Free Trial landing page by removing the instructions (as we didn’t need them anymore) and balancing the two options for starting a free trial:\n\n![Improved free trial landing page](https://about.gitlab.com/images/blogimages/free-trial-improvements/improved-landing-page.jpg){: .large.shadow.center}\n\n## The new free trial signup flow launches\n\nAfter a lot of hard but well coordinated work, the new free trial signup flow launched on October 29, 2019. The results were clear in less than one week. The week before the launch, we had 466 free trial signups. In the week of the launch the number rose to 628, then to 842 in the week after. They remained well above 800 throughout November. We then saw a small dip during December (but it never fell below 600) and the climb resumed in January. We’re now getting more than 900 free trial signups per week.\n\n![Free trial signups chart](https://about.gitlab.com/images/blogimages/free-trial-improvements/chart.jpg){: .large.center}\n\nI quickly crunched the numbers and came to the following conclusion:\n\n> Average signups per week before launch: **330** \u003Cbr>\n> Average signups per week after launch: **794** \u003Cbr>\n> Which results in an improvement of **141%**\n\nSo we more than doubled the amount of free trial signups, but what exactly led to these results? Another colleague, [Kevin Comoli](/company/team/#kcomoli), recently did a follow-up [UX scorecard](https://gitlab.com/gitlab-org/growth/product/issues/166) to rescore the experience. His findings? It now takes around 17 interactions (instead of the original 37) and around 2.5 minutes to complete the process. So we reduced both by more than half and that’s why we’re seeing such an increase in completed signups. Take a look at the latest version of the [user journey](https://app.mural.co/t/gitlab2474/m/gitlab2474/1572360181709/cb4df793a4d4b98395b8c98c6510d21b4a2d6747) mapped by Kevin.\n\n## Organically grown versus holistically designed experiences\n\nExperiences are either intentionally and holistically designed by someone or they get designed by what I call “organically grown” smaller parts of the experience. It’s like cultivating a garden: we start off by planting a few flowers and bushes but leave some empty space around them. Eventually, if we don’t do anything, this empty space will get overgrown with weeds. Our flowers and bushes will also grow in an uncontrolled way. So until a gardener comes around and tidies everything up, our garden will be a mess. It’s the same with our digital products – if a designer with a holistic overview isn’t involved, different parts of our products grow into a mess that doesn’t work as a whole. The *holistic overview* is the key here. It’s not enough to have designers involved if all they do is design separate screens instead of complete experiences. We need to look at how things work as a whole. That’s when designers, and the teams they work with, are most successful.\n\n## Where do we go from here?\n\nWe’re thrilled about the improvements we have already achieved but we also feel there’s a lot more we can do. I personally would still like to see the time required to complete the process be reduced to around a minute. As part of his UX scorecard, Kevin also came up with additional [recommendations for improvements](https://gitlab.com/groups/gitlab-org/growth/-/epics/7). There, he talks about trimming down the information shown in the process, improving the entry points to the flow and tailoring its steps based on the user type. We’re all looking forward to  these improvements being implemented.\n\nPhoto by [Startaê](https://unsplash.com/@startaeteam?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) Team on [Unsplash](https://unsplash.com/s/photos/post-it?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[700,1698,1152,9],{"slug":3620,"featured":6,"template":680},"how-holistic-ux-design-increased-gitlab-free-trial-signups","content:en-us:blog:how-holistic-ux-design-increased-gitlab-free-trial-signups.yml","How Holistic Ux Design Increased Gitlab Free Trial Signups","en-us/blog/how-holistic-ux-design-increased-gitlab-free-trial-signups.yml","en-us/blog/how-holistic-ux-design-increased-gitlab-free-trial-signups",{"_path":3626,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3627,"content":3633,"config":3639,"_id":3641,"_type":14,"title":3642,"_source":16,"_file":3643,"_stem":3644,"_extension":19},"/en-us/blog/how-i-transitioned-from-frontend-to-ux",{"title":3628,"description":3629,"ogTitle":3628,"ogDescription":3629,"noIndex":6,"ogImage":3630,"ogUrl":3631,"ogSiteName":667,"ogType":668,"canonicalUrls":3631,"schema":3632},"How I transitioned from frontend to UX","One GitLab team-member shares how switching from a frontend engineer to a UX designer has been a rewarding experience.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679015/Blog/Hero%20Images/frontendux.jpg","https://about.gitlab.com/blog/how-i-transitioned-from-frontend-to-ux","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How I transitioned from frontend to UX\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Annabel Dunstone Gray\"}],\n        \"datePublished\": \"2018-10-05\",\n      }",{"title":3628,"description":3629,"authors":3634,"heroImage":3630,"date":3636,"body":3637,"category":808,"tags":3638},[3635],"Annabel Dunstone Gray","2018-10-05","\nWhen I joined GitLab over two and a half years ago as a frontend engineer, I brought with\nme a background in photography and an interest in art and design. In my last year\nof university, I worked at an art museum, and I’ve always gravitated towards the\nmore design-y aspects of frontend. For each release, my assigned deliverables\nwere usually focused on redesigns, and while I enjoy that type of work, what I\nreally wanted to do was to help shape the look and feel of GitLab, rather than\nimplementing the designs of others.\n\n## Making the first move\n\nAt GitLab, we're lucky to have the opportunity to [transfer](/handbook/people-group/promotions-transfers/#department-transfers)\nto a different department, if our interests or career goals change. I spoke with\nmy frontend manager about my passions and shared my desire to start learning and\nworking with the UX team. I then spoke with [Sarrah](/company/team/#SVesselov),\nthe UX Manager, about the next steps, and I started working through online\ntutorials, getting up to speed on Sketch, and attending the UX weekly calls.\nOnce I acquired the necessary technical skills, I joined the [Plan](/direction/#plan)\nteam, which is focused mostly on the prioritization of ideas, allocation of\nresources, scheduling, and tracking. It’s an area I’m really excited about, and\nwe’re working on some incredibly useful management features (like [improved issue boards](https://gitlab.com/gitlab-org/gitlab-ce/issues/48847), [sub-epics](https://gitlab.com/gitlab-org/gitlab-ee/issues/7327), and [value stream management](https://gitlab.com/groups/gitlab-org/-/epics/229)) that will help make\nGitLab an even more powerful tool.\n\nAs a frontend engineer, I was fortunate to have developed many transferable\nskills which helped me tackle this new challenge. Attention to detail is one\nskill that has been particularly useful when working on a new feature. Since\nI’m new to UX, I’ve found it really helpful to have a technical background,\nespecially considering that GitLab is such a technical product.\n\n## Advice to others\n\n![Me and my daughter attending a frontend meeting.](https://about.gitlab.com/images/blogimages/annabelandbaby.jpg){:.shadow.small.right.wrap-text}\n\nIf you’re interested in making a similar transition, I encourage you to speak\nwith your manager. I wish I’d done so sooner. I discussed my interests early\nlast year, but after having a baby, I had this idea that I\nshould stay in my current role, as I would never have time to learn a whole new\npractice. While I definitely don’t have any free time (I don’t know if you’ve\nheard – babies are quite time consuming), I’m so happy to be on the UX team, even\nthough I have a lot of catching up to do. Everyone in both frontend and UX has\nbeen incredibly supportive of my switching teams, and I’m learning a lot as I go\nalong. For now, I’ve got the best of both worlds – 50 percent of my time is focused on\nstyling-related frontend issues and reviewing the CSS in merge requests, while\nthe other 50 percent is working on UX issues.\n\nBy the way, we're hiring for loads of positions, across the company – [check out our current job openings](/jobs/).\n\n[Cover image](https://unsplash.com/photos/aLGiPJ4XRO4) by [Bharath](https://unsplash.com/@xen0m0rph), licensed under [CC X](https://unsplash.com/license).\n{: .note}\n",[700,3138,810,9],{"slug":3640,"featured":6,"template":680},"how-i-transitioned-from-frontend-to-ux","content:en-us:blog:how-i-transitioned-from-frontend-to-ux.yml","How I Transitioned From Frontend To Ux","en-us/blog/how-i-transitioned-from-frontend-to-ux.yml","en-us/blog/how-i-transitioned-from-frontend-to-ux",{"_path":3646,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3647,"content":3652,"config":3658,"_id":3660,"_type":14,"title":3661,"_source":16,"_file":3662,"_stem":3663,"_extension":19},"/en-us/blog/how-remote-work-at-gitlab-enables-location-independence",{"title":3648,"description":3649,"ogTitle":3648,"ogDescription":3649,"noIndex":6,"ogImage":1669,"ogUrl":3650,"ogSiteName":667,"ogType":668,"canonicalUrls":3650,"schema":3651},"How I work from anywhere (with good internet)","Sarah Daily, digital marketing programs manager and remote work advocate, shares how all-remote work at GitLab has enabled her life on the road.","https://about.gitlab.com/blog/how-remote-work-at-gitlab-enables-location-independence","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How I work from anywhere (with good internet)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Daily\"},{\"@type\":\"Person\",\"name\":\"Betsy Church\"}],\n        \"datePublished\": \"2019-06-25\",\n      }",{"title":3648,"description":3649,"authors":3653,"heroImage":1669,"date":3655,"body":3656,"category":808,"tags":3657},[3654,1267],"Sarah Daily","2019-06-25","\n\nWe're committed to [all-remote](/company/culture/all-remote/) work at GitLab – our whole work\nphilosophy is designed around it. So we're always happy to share when one of our team members\nis taking full advantage of the flexibility that remote work affords. We chatted with [Sarah Daily](/company/team/#sdaily) about\nher life on the road:\n\n### What’s your role at GitLab, and why did you want to join the team?\n\nI’m a [digital marketing programs manager](https://handbook.gitlab.com/job-families/marketing/online-growth-manager/index.html) focusing on conversion rate optimization and analysis for our email programs and website. Previously, I was a digital marketing manager for a non-profit organization in the education industry.\n\nI'm a remote work advocate and knew about some of the companies that are 100% remote (GitLab being prominent among them).\n\nThough I had a remote job at the time I applied to GitLab, I knew eventually my passion for technology and software development would lead me elsewhere. I decided to seek GitLab directly to see if they had any open positions in their marketing department. As fate would have it, they did, so I applied immediately.\n\nThe more I learned about the company and culture, the more I fell in love. GitLab is a model for how companies should implement remote work. The culture and values are so deeply integrated in how everyone works and behaves. Everything we do and how we work is centered around being a global workforce and allows us to move at the speed of innovation.\n\n### Tell us about your traveling home office and when you started life as a digital nomad.\n\nThree years ago, my partner and I were living in an 800-square-foot apartment with daily commute jobs. We no longer wanted to live where we were, but we didn’t want to choose a random place to move to without knowing whether we were actually going to like it there.\n\nWe needed to be able to visit family and friends with little hassle, and if we lived over a 1,000 miles away then that was going to be a considerable effort and cost. Before we could make any decisions, I needed the ability to work remotely and I ended up finding a remote job with a non-profit organization that had a hybrid remote work model.\n\nOne night, my partner came home from work and made the suggestion to live in an RV. It would be cheaper to live, we could travel and live anywhere we wanted* for as long as we wanted, and we would be able to visit family and friends, all while living in the comfort of our own home.\n\n![Sarah's truck and trailer](https://about.gitlab.com/images/blogimages/sdaily-truck-and-trailer.jpg){: .shadow.medium.center}\nSarah's truck and trailer\n{: .note.text-center}\n\nAfter researching blogs, Facebook groups, and other websites, we realized not only was it actually possible, but that thousands of other people, couples, and families were doing this and had been doing it for years.\n\nBut before we could start the process, we had to downsize a lot.\n\nWe sold a car, all our furniture, and gave the rest away to Goodwill or family and friends. In March 2016, we moved the rest of our belongings into a less than 200-square-foot space and hit the road. We’ve been all over the west coast of the US and Canada.\n\nOur rig is a 40-foot travel trailer that we haul with our truck. After living and traveling in it for three years, it actually has more space than we need.\n\nMore than anything, we love the freedom of being able to pick up and leave for a new location, all while being in our home. We’ll likely continue to do this for the foreseeable future.\n\n*Criteria: Has to have good internet and an airport nearby.\n{: .note}\n\n### How has working for GitLab enabled you to chase your passion for travel?\n\nThough we’ve been full-time traveling for over three years, GitLab makes this even easier because of the focus on asynchronous work. While some companies allow their employees to work remotely, it isn’t always flexible.\n\nAt my previous job, I was expected to work at least partially in a specific time zone. This is because there was a central HQ and only some employees worked remotely full time. This created a separation and isolation for remote employees. It made us feel like we weren’t always involved in meetings and conversations that happened at HQ.\n\nWith the asynchronous model, I don't have to worry about when I'm working because all my colleagues live in different time zones. This gives me the freedom to design my day around my peak productive hours and also have time to take care of general life stuff (appointments, house chores, etc.)\n\n![Sarah fishing in Grand Teton National Park, Wyoming](https://about.gitlab.com/images/blogimages/sarah-fishing-grand-teton-national-park-wy.jpg){: .shadow.medium.center}\nSarah fishing in Grand Teton National Park, Wyoming\n{: .note.text-center}\n\n### What makes GitLab unique?\n\nIt is so refreshing to work at GitLab. The culture really enables you to be the best version of yourself both as an employee and a human being outside of work. Everyone here fully embraces our ideals and values and it makes contributing a pleasure.\n\n>Everything we do and how we work is centered around being a global workforce and allows us to move at the speed of innovation\n\nYou really feel like you make a difference each day, [no matter how small or boring](https://handbook.gitlab.com/handbook/values/#boring-solutions).\n\nBut I think the biggest difference between GitLab and other companies I’ve worked for is the [transparency](https://handbook.gitlab.com/handbook/values/#transparency). By being transparent with our employees, customers, and community, we enable everyone to fall in love with the product and vision, and contribute to making it better every day.\n\nIt truly becomes a shared goal and I think that’s something that is missing from most company cultures. If you cannot enable everyone to have a say through transparency, you bottleneck the entire company for everyone.\n\n\n\nLearn more about [all-remote](/company/culture/all-remote/) work and [how it works at GitLab](/company/culture/all-remote/tips/#how-it-works-at-gitlab).\n\nWant to join the team? [Browse our vacancies](/jobs/).\n",[677,9,832],{"slug":3659,"featured":6,"template":680},"how-remote-work-at-gitlab-enables-location-independence","content:en-us:blog:how-remote-work-at-gitlab-enables-location-independence.yml","How Remote Work At Gitlab Enables Location Independence","en-us/blog/how-remote-work-at-gitlab-enables-location-independence.yml","en-us/blog/how-remote-work-at-gitlab-enables-location-independence",{"_path":3665,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3666,"content":3671,"config":3678,"_id":3680,"_type":14,"title":3681,"_source":16,"_file":3682,"_stem":3683,"_extension":19},"/en-us/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform",{"title":3667,"description":3668,"ogTitle":3667,"ogDescription":3668,"noIndex":6,"ogImage":2010,"ogUrl":3669,"ogSiteName":667,"ogType":668,"canonicalUrls":3669,"schema":3670},"How ten steps over ten years led to the DevOps Platform","It's been ten years since the first commit to GitLab! Here's a look at ten critical choices that shaped us.","https://about.gitlab.com/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How ten steps over ten years led to the DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2021-10-11\",\n      }",{"title":3667,"description":3668,"authors":3672,"heroImage":2010,"date":3674,"body":3675,"category":787,"tags":3676},[3673],"Brendan O'Leary","2021-10-11","\nThe first commit to GitLab (!!) was 10 years ago. Today, it’s an entirely different world: DevOps is increasingly mainstream and there's a DevOps platform revolution.\n\nWe didn’t have a crystal ball back then, but we did try to create a product, a culture and a company that reflected what we thought mattered most. Here’s a look back at 10 key decisions we made that still have impact:\n\n1. Work in parallel: When we started, it was clear the waterfall method of software development - where one stage waited on another stage and nothing happened independently - slowed everything. We decided right from the beginning that a “work in parallel” philosophy would be fundamental to our culture and our behaviors. Also, such a philosophy naturally supported everything else we did, including bringing CI and CD together and operating as an all-remote company. Working in parallel is also vital to success with DevOps.\n\n2. CI, meet git: To merge dev and ops you have to merge development and operations. We [weren’t really sure](/blog/gitlab-hero-devops-platform/) bringing CI together with a git repository was the right step to take, but we tried it and [it worked](/blog/beginner-guide-ci-cd/). Now, developers expect CI to be perfectly integrated into their daily work, and, more and more, they are using a DevOps platform to centralize CI and CD.\n\n3. Cloud native: We’ve been talking about Kubernetes and the options made possible by cloud-native development since [2017](/blog/containers-kubernetes-basics/). We’re true believers in supporting the ability to embrace cloud-native technology and patterns.  The concept of cloud native enables teams to deliver better software faster, break down their applications into microservices and focus engineering time on delivering value to their customers - not on maintaining brittle infrastructure.\n\n4. The mighty merge request: We doubled down on the idea of a merge request, making it the hub of absolutely everything. Merge requests are not only the gateway to production, but all the other critical steps, such as security checks, which can be found in there as well. Plus, the merge request serves as a living record of changes and is essential for [better code review](/blog/iteration-and-code-review/).\n\n5. Developer-first security: For developers to have ownership of security, they need scanning early in the process and results in their workflow. That’s why [developer-first security](/topics/devsecops/what-is-developer-first-security/) is at the heart of our DevOps Platform.\n\n6. A complete definition of security: Security isn’t a “one and done” effort and our DevOps Platform enables us to offer a broad spectrum of security scans that goes far beyond just SAST and DAST. From scanning for dependencies or looking at containers, we cover all the security bases in a single platform.\n\n7. All remote, all the time: With no corporate headquarters and employees in 65 countries and regions (as of October 2021), we’re [all remote](https://handbook.gitlab.com/handbook/company/culture/all-remote/guide/) and proud of it. This decision transformed into a corporate value that has influenced our choices and behaviors. \n\n8. Asynchronous communication: A natural result of being remote, [asynchronous communication](https://handbook.gitlab.com/handbook/company/culture/all-remote/asynchronous/) is something we take seriously. We’re a [“handbook first”](https://handbook.gitlab.com/handbook/company/culture/all-remote/handbook-first/) organization, meaning we write everything down so information is as self-service as possible. We also carefully consider what time is spent in meetings, limiting their frequency and regularly asking ourselves if “asynchronous” is better. This has allowed us to successfully have employees in nearly every time zone around the world and follow the working in parallel philosophy.\n\n9. Visibility: Planning is critical, but it’s equally important to pair it with visibility so everyone knows what’s happening and where it’s happening. Giving context for the original plan to all team members throughout the DevOps lifecycle, how the plan has changed, and what the implementation looks like in the end is a critical advantage to a single DevOps platform.  Without this, time is wasted trying to update multiple systems with issue status, or having conflicting information in independent tools. \n\n10. Measure the results: We firmly believe it’s important to know how the stages of the SDLC are going, in detail. After all, if you can’t measure your results, how can you know things are moving in the right direction? Many DevOps teams don’t or can’t measure, but that can make it difficult to convince management of the value of the methodology. A DevOps platform makes measurement easy.\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n- [It's time to build more accessible software. A DevOps platform can help](/blog/how-the-devops-platform-makes-building-accessible-software-easier/)\n",[3677,1440,9],"DevOps platform",{"slug":3679,"featured":6,"template":680},"how-ten-steps-over-ten-years-led-to-the-devops-platform","content:en-us:blog:how-ten-steps-over-ten-years-led-to-the-devops-platform.yml","How Ten Steps Over Ten Years Led To The Devops Platform","en-us/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform.yml","en-us/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform",{"_path":3685,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3686,"content":3692,"config":3697,"_id":3699,"_type":14,"title":3700,"_source":16,"_file":3701,"_stem":3702,"_extension":19},"/en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier",{"title":3687,"description":3688,"ogTitle":3687,"ogDescription":3688,"noIndex":6,"ogImage":3689,"ogUrl":3690,"ogSiteName":667,"ogType":668,"canonicalUrls":3690,"schema":3691},"It's time to build more accessible software. A DevOps platform can help","Shifting accessibility left can make building accessible products simpler and more efficient. A DevOps platform makes it easier to customize and adjust priorities to suit your business needs.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667281/Blog/Hero%20Images/accessibility.jpg","https://about.gitlab.com/blog/how-the-devops-platform-makes-building-accessible-software-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's time to build more accessible software. A DevOps platform can help\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2021-09-21\",\n      }",{"title":3687,"description":3688,"authors":3693,"heroImage":3689,"date":3694,"body":3695,"category":1340,"tags":3696},[672],"2021-09-21","\n\nThe earlier a feature or process is introduced in the multi-step software development lifecycle (SDLC), the more likely it is to be fully integrated into the product. \n\nIt's well documented how security can [shift left using a DevOps platform](/blog/devops-platform-supply-chain-attacks/), so it's time to make the case that accessbility needs to be thought about earlier (and, clearly, a DevOps platform can facilitate that too). Although there are laws that require applications to meet certain accessibility requirements, which [opens an application up to a broader user base](/blog/how-the-open-source-community-can-build-more-accessible-products/), rarely is accessibility considered a core product requirement. Instead, it is just a test tacked on at the end instead of being built into the DevOps platform process.\n\n**[Learn more about [how the open source development community helps build accessible software](/blog/how-the-open-source-community-can-build-more-accessible-products/)]**\n\n\"The problem really is that accessibility is not usually explicitly defined as a problem,\" said Segun Ola, a frontend web developer at engineering talent finder Andela, during a presentation at accessibility conference [axe-con](https://www.deque.com/axe-con/). \"Most of the time, developers go through the product lifecycle and we identify all the other problems with a product or all of the things we want to solve and ignore accessibility for the greater part.\"\n\n## Accessibility in software development: It starts with education\n\nOftentimes, omissions are unintentional and have more to do with a lack of awareness around why accessibility in software development is so important for many people living with disabilities and a key driver of business value. After all, the more accessible your product, the more users can benefit from it.\n\n\"I have met software engineers and designers who did not even know that there's a thing called a screen reader,\" Ola said. \"Just last week, I was reviewing some code and explaining why the code needed to be refactored. A junior engineer asked me ‘what is a screen reader?’ So I had to get on a call with him and show him how screen readers work. And then he asks me, ‘What's the point of a screen reader?’ And I had to tell him: ‘Oh yeah there are people who can't see the way you and I see.’\"\n\nThis is just one example of why having empathy and education around accessibility so important, says [Taurie Davis](/company/team/#tauriedavis), product design manager on Ecosystems at GitLab. Earlier in 2021, the GitLab UX team set a [goal to become a department of accessibility experts and advocates at GitLab](https://gitlab.com/groups/gitlab-org/-/epics/5235) by completing a 26-hour training at [Deque University](https://dequeuniversity.com/) on accessibility in software development.\n\n## Ignoring accessibility? Expect more technical debt\n\nSometimes software companies will see investment in accessibility components for a product as expensive and/or as a trade-off for innovation. Development teams that wait until the end of the SDLC to think about accessibility are more likely to have coded components that are inaccessible, only to have to go back and rework them to suit legal accessibility standards. This process can lead to an immense amount of technical debt.\n\n**Take a deep dive into [all aspects of the DevOps platform](/solutions/devops-platform/)**\n\n\"Once a team does start to become educated about accessibility and they have the empathy and have the drive to make the change and start shifting accessibility left it's easy to see all of the debt that you've accrued around accessibility,\" says Taurie. \"It can be really expensive to get yourself out of that debt.\" Taurie points to examples such as having to go back to change variables for color contrast, and ensuring that filtering and tab reordering can be done in a way that screen readers understand it.\n\n\"There are just so many different aspects and elements that could cause you to go back and just rewrite how the entire feature was originally developed and that can affect every aspect of your product,\" she adds.\n\n## Other barriers to implementing accessibility earlier\n\nFor UX designers like [Jeremy Elder](/company/team/#jeldergl), staff product designer on Ecosystems at GitLab, and Taurie, the typical workflow is about testing artifacts and responding to customer feedback, as opposed to thinking proactively about how someone might use the product.\n\n\"It’s more of a softer skill to think through a lot of those abstract ideas and what-ifs upfront rather than just saying, ‘Hey, we need this widget to do XYZ,’\" says Jeremy. \"Instead of asking questions like ‘how might somebody want to use this? How does it fit in their workflow?’. That is more inclusive thinking that helps you to do that, but it's harder and not as common.\"\n\n## Building accessible software isn’t just ethical, it drives business value\n\nOftentimes accessibility in software development is framed around building products to better serve people living with disabilities. While this is essential and ethical, accessibility can also be about building software products that can easily adapt to a user’s workflow.\n\n\"It’s more rigor around understanding workflows and how somebody is wanting to use it and less about focusing necessarily on a disability per se, or an outcome,\" says Jeremy. \"You want to think about personas or jobs to be done, not just think about the ultimate task, but how somebody is achieving that task.\"\n\n**[Ten key features](/topics/devops-platform/) of a DevOps platform**\n\nProducts that are customizable and adaptable are more likely to pique the interest of clients who have specific needs (e.g., a screenreader) or workflow preferences (e.g., using a particular type of keyboard).\n\n## What are the solutions?\n\nThe simplest solution to building more accessible software solutions is to think about accessibility at the beginning of the SDLC, rather than waiting until the end. Companies that use a complete DevOps platform like GitLab will find it simpler to take iterative steps toward shifting accessibility left. Need an example? Make accessibility part of the requirements a dev team needs to complete before a particular feature can be considered \"done.\" One way to do this would be to update issue templates and MR templates to ensure an accessibility step is part of the checklist.\n\nWhether it’s security or accessibility, shifting something left is about bringing the conversation to the beginning of the SDLC, something made much more straightforward with a DevOps platform. When it comes to accessibility, the more accessible the product is, the broader the pool of users (and future customers) can benefit.\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n",[1440,9,745],{"slug":3698,"featured":6,"template":680},"how-the-devops-platform-makes-building-accessible-software-easier","content:en-us:blog:how-the-devops-platform-makes-building-accessible-software-easier.yml","How The Devops Platform Makes Building Accessible Software Easier","en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier.yml","en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier",{"_path":3704,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3705,"content":3711,"config":3716,"_id":3718,"_type":14,"title":3719,"_source":16,"_file":3720,"_stem":3721,"_extension":19},"/en-us/blog/how-the-security-culture-committee-is-strengthening-gitlab-values",{"title":3706,"description":3707,"ogTitle":3706,"ogDescription":3707,"noIndex":6,"ogImage":3708,"ogUrl":3709,"ogSiteName":667,"ogType":668,"canonicalUrls":3709,"schema":3710},"How the Security Culture Committee is strengthening GitLab values","Learn how this group of team members works to preserve and reinforce GitLab values in the Security department and beyond.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670879/Blog/Hero%20Images/Sec-Culture-Committee-blog.png","https://about.gitlab.com/blog/how-the-security-culture-committee-is-strengthening-gitlab-values","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How the Security Culture Committee is strengthening GitLab values\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2021-05-07\",\n      }",{"title":3706,"description":3707,"authors":3712,"heroImage":3708,"date":3713,"body":3714,"category":720,"tags":3715},[1010],"2021-05-07","\n\nTransparency is a core value here at GitLab and we strive to be [\"open about as many things as possible\"](https://handbook.gitlab.com/handbook/values/#transparency), but as any security practitioner knows, this can, at times, feel as though it conflicts with the work we do within security. That feeling of conflict is one of the main drivers behind the creation of a [Security Culture Committee](/handbook/security/security-culture.html) here at GitLab. The other is to ensure the Security department, and all of GitLab, lives up to our [company values](https://handbook.gitlab.com/handbook/values/), especially as we continue to scale. The [mission and goals of the Security Culture Committee](/handbook/security/security-culture.html#mission-statement) were developed by the committee members themselves, with an eye on our GitLab values and also to ensure representation of our fellow team members.\n\n## How does the committee work?\n\nOur first group of team members, five of us, were peer nominated (thanks, team 😉) back in August of 2020 and include: [Dominic Couture](/company/team/#dcouture), [Mark Loveless](/company/team/#mloveless), [Joern Schneeweisz](/company/team/#joernchen), [Heather Simpson](/company/team/#heather), and [Steve Truong](/company/team/#sttruong). We meet monthly via Zoom (meetings are recorded and viewable internally for GitLab team members) to discuss candidate initiatives or process improvements where GitLab values could be better represented. Between meetings, we work async through GitLab issues and in a dedicated, public-to-GitLab Slack channel (#security-culture).\n\nFellow team members can bring suggestions for initiatives we should tackle via #security-culture channel, an issue or a Slack DM if that's more comfortable. Candidate initiatives are anything where [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration), [results](https://handbook.gitlab.com/handbook/values/#results), [efficiency](https://handbook.gitlab.com/handbook/values/#efficiency), [diversity, inclusion & belonging](https://handbook.gitlab.com/handbook/values/#diversity-inclusion), [iteration](https://handbook.gitlab.com/handbook/values/#iteration) and/or [transparency](https://handbook.gitlab.com/handbook/values/#transparency) (all GitLab values), could be strengthened and improved.\n\n## Where has the committee focused our efforts so far?\n\nOne of the first things we tried to do was determine how we would define \"success\". We weren't sure, so reached out to the Security department via an anonymous feedback form asking the following questions:\n\n* Do you think the Security Culture Committee is strengthening the GitLab values within the Security department?\n* Do you think the Security Culture Committee should continue its efforts for at least another quarter?\n* Do you have anything to share what the committee could do in the future? Any ideas for opportunities are welcome.\n* Anything else you'd like to mention to the committee?\n\nFor the first two questions, team members had to rate their agreement with the statements on a scale of one (strongly disagree) to five (strongly agree) and 91% of answers were four or above. The other two questions generated interesting ideas to improve transparency in the department and better ways to communicate important news and initiatives across GitLab through Slack updates and entries in our Engineering department's week-in-review newsletter. There's definitely opportunity to improve and strengthen communication within GitLab around Security work and initiatives, and the value these efforts bring to the rest of the organization\n\n### Public profiles for transparency and collaboration\n\nAnother early initiative for our group was to encourage more GitLab team members to adopt public profiles to increase transparency across the company. The use of open, public profiles enables company-wide visibility into projects, plans, statuses, and updates. Public profiles ensure efficiency and fosters greater collaboration when there is visibility into the ongoing efforts of GitLab teams and team members. Public profiles also allow any visitor to see the work team members are doing in public projects. See Heather's profile: [https://gitlab.com/heather](https://gitlab.com/heather) as an example.\n\n![Screenshot of Heather Simpson's public GitLab profile](https://about.gitlab.com/images/blogimages/sec-culture-blog/heathersimpson_publicprofile.png){: .shadow.medium.center}\nPublic profiles foster collaboration through greater visibility into the work GitLab team members are doing.\n{: .note.text-center}\n\n\nTo encourage public profile use, we held a Slack campaign where we communicated the value of public profiles and shared our progress toward the goal of making all GitLab profiles public by default.\n\n![Public GitLab profiles Slack campaign](https://about.gitlab.com/images/blogimages/sec-culture-blog/public_profile_msg.png){: .shadow.medium.center}\nAn example of our internal Slack campaign to encourage GitLab team members to switch their profiles from private to public.\n{: .note.text-center}\n\nWe also [added language](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/60262/diffs) to the [values page of the GitLab Handbook](https://handbook.gitlab.com/handbook/values/#transparency) encouraging the use of public profiles:\n\n> In line with our value of transparency and being public by default, all GitLab team member [profiles](https://docs.gitlab.com/ee/user/profile/#user-profile) should be public. Public profiles also enable broader collaboration and efficiencies between teams. To do so, please make sure that the checkbox under the [Private profile](https://docs.gitlab.com/ee/user/profile/#private-profile) option is unchecked in your profile settings. If you do not feel comfortable with your full name or location on your profile, please change it to what feels appropriate to you as these are displayed even on private profiles.\n\nAnd we added [clarification](https://gitlab.com/gitlab-com/people-group/people-operations/employment-templates/-/merge_requests/465/diffs) to our [onboarding template](https://gitlab.com/gitlab-com/people-group/people-operations/employment-templates/-/blob/c80404ffc53b143bfc393ab69b7ce482de3efdad/.gitlab/issue_templates/onboarding.md#L422) around why we use public profiles to ensure new team members understand how they contribute to GitLab's value of transparency and being [public by default](https://handbook.gitlab.com/handbook/values/#public-by-default).\n\nOur Security Culture Committee will continue to revisit this topic and educate team members on the value of public profiles, but we're proud of our team members commitment to transparency and the results we've achieved, together, to-date:\n\n**As of May 5, 2021:** 🎉\n* All of GitLab: 2.18% private profiles (28 out of 1307)\n* Security department: 2.22% private profiles (1 out of 48)\n\n### Increase transparency in department leadership meetings\n\nBeyond ensuring our GitLab profiles are public, the Security Culture Committee, in partnership with [Security department](/handbook/security/#security-department) leadership, has also advocated for several department and sub-department meeting notes and recordings to be made available internally. By making notes and recordings available, all team members can stay informed about what's going on at the Security leadership level and follow meeting notes and recordings [asynchronously](https://handbook.gitlab.com/handbook/values/#bias-towards-asynchronous-communication). Besides providing more transparency, this also supports our collaboration and results values, as information is made available for all to read and contribute to.\n\n### Strengthen the employee experience\n\nOn a bi-annual cadence, GitLab conducts an organization-wide [Team Member Engagement Survey](/handbook/people-group/engagement/) to give team members an opportunity to provide feedback related to their experience within GitLab across multiple elements, including culture. The results from this survey are aggregated by department and shared with department heads.\n\nGitLab VP of Security [Johnathan Hunt](/company/team/#JohnathanHunt), engaged the culture committee to dive deeper into the Security department specific results from the Team Member Engagement Survey and help identify areas for improvement. After reviewing results, the committee outlined four focus areas where we could strengthen employee experience across the Security department based on survey results:\n\n* I believe there are good career opportunities at GitLab\n* I have access to the L&D I need to do my job well\n* GitLab is in a position to really succeed over the next three years\n* I have confidence in senior leaders and execs at GitLab\n\n**The culture committee established various channels for Security team members to share their feedback:**\n\n* Anonymous response to a Security department specific survey (delivered via Google forms)\n* Survey response provided to their manager in a 1:1 session where feedback was then summarized, anonymized, and provided to the committee\n* 1:1 feedback directly to a member of the culture committee over a coffee chat\n\n**About 62% of the Security department provided feedback, not including aggregated feedback that was provided to managers in 1:1 conversations. As part of the survey, we asked Security team members to:**\n\n* Prioritize and rank the four focus areas mentioned above\n* Provide recommendations for improvement within each focus area\n* Supply any additional feedback and recommendations they wanted to share\n\nOnce all feedback was gathered, the culture committee worked to consolidate and anonymize the data to ensure that specific team members could not be identified based on language used in their feedback. The next steps included sharing the qualitative survey data and summarized feedback with the entire team, and making recommendations for action, based on survey data, to senior leadership. Security leadership took the recommendations from the [top three focus areas and formalized an OKR](https://gitlab.com/groups/gitlab-com/gl-security/-/epics/109) for Q1.\n\nSo, what are the results so far?\n\n\u003Cdetails markdown=\"1\">\n\u003Csummary>\u003Cb>Priority 1 focus area: I believe there are good career opportunities at GitLab\u003C/b>\u003C/summary>\n\n* Implemented an [individual development plan](/handbook/security/individual-development-plan.html) so team members can continuously discuss career path and growth opportunities with their manager\n* Leadership exploration of additional career opportunities by mapping out additional role levels within the Security department\n\n\u003C/details>\n\n\u003Cdetails markdown=\"1\">\n\u003Csummary>\u003Cb>Priority 2: I have confidence in senior leaders and execs at GitLab \u003C/b>\u003C/summary>\n\n* Collaboration\n   * Established a [Security Department Team Day](https://gitlab.com/gitlab-com/gl-security/security-department-meta/-/issues/1133) to encourage collaboration and networking across the security organization\n   * Added a [Security OKR](/handbook/security/OKR.html) handbook page to encourage cross-functional OKRs\n* Diversity, Inclusion, and Belonging (DIB)\n   * Allyship training for Security department senior leadership team\n   * Planning for maturation of DIB specific metrics for the Security department\n* Transparency\n   * Updates to the [Security leadership job family](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/78910) handbook page to further define responsibilities by role\n   * Include Security department activities within the [Engineering week-in-review](/handbook/engineering/#communication)\n\n\u003C/details>\n\n\u003Cdetails markdown=\"1\">\n\u003Csummary>\u003Cb>Priority 3: I have access to the L&D I need to do my job well\u003C/b>\u003C/summary>\n\n* Dedicated handbook page to centralize all [Learning and Development opportunities](/handbook/security/learning-and-development.html) for Security team members\n* Process to enable team members to prioritize and [dedicate eight working hours per month to L&D](/handbook/security/learning-and-development.html#dedicate-time-to-ld)\n\n\u003C/details>\n\n## What's next\n\nEach set of culture committee members are nominated to serve a six-month term. We, the first set of committee members, have established some basic processes and hit the ground running on a few initiatives that we hope has laid some groundwork for future committee members and impacts how we live our values within the Security department and throughout GitLab. We've started onboarding the next set of peer-nominated Security Committee members, which includes [Liz Coleman](/company/team/#lcoleman), [Devin Harris](/company/team/#dsharris), [Andrew Kelly](/company/team/#ankelly), [Philippe Lafoucrière](/company/team/#plafoucriere), [Marley Riser](/company/team/#marleyr), and [Juliet Wanjohi](/company/team/#jwanjohi).\n\nSo, what should be prioritized and tackled first by this new committee? We know they will each come in with their own unique and valuable perspective and ideas on how to ensure our GitLab values are strengthened as we scale and represented in the work on the Security team and beyond. We look forward to continuing to contributing to this work on behalf of all of our team members and will keep you posted!\n\nHave some feedback on the initiatives we've worked on as part of our Security Culture Committee? Or suggestions based on what's worked within your organization? Let us know in the comments!\n",[720,9],{"slug":3717,"featured":6,"template":680},"how-the-security-culture-committee-is-strengthening-gitlab-values","content:en-us:blog:how-the-security-culture-committee-is-strengthening-gitlab-values.yml","How The Security Culture Committee Is Strengthening Gitlab Values","en-us/blog/how-the-security-culture-committee-is-strengthening-gitlab-values.yml","en-us/blog/how-the-security-culture-committee-is-strengthening-gitlab-values",{"_path":3723,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3724,"content":3730,"config":3735,"_id":3737,"_type":14,"title":3738,"_source":16,"_file":3739,"_stem":3740,"_extension":19},"/en-us/blog/how-to-become-more-productive-with-gitlab-ci",{"title":3725,"description":3726,"ogTitle":3725,"ogDescription":3726,"noIndex":6,"ogImage":3727,"ogUrl":3728,"ogSiteName":667,"ogType":668,"canonicalUrls":3728,"schema":3729},"How to become more productive with Gitlab CI","Explore some CI/CD strategies that can make your team more efficient and productive.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667358/Blog/Hero%20Images/gitlab-productivity.jpg","https://about.gitlab.com/blog/how-to-become-more-productive-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to become more productive with Gitlab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2021-06-21\",\n      }",{"title":3725,"description":3726,"authors":3731,"heroImage":3727,"date":3732,"body":3733,"category":743,"tags":3734},[1755],"2021-06-21","\nCI/CD pipelines are the preeminent solution to mitigate potential risks while integrating code changes into the repository. CI/CD pipelines help isolate the impact of potential errors, making it easier to fix them. Top that with a tool that provides effective visibility into the running tasks and there you have a recipe for success.\n\nSince the primary purpose of CI/CD pipelines is to speed up the development process and provide value to the end user faster, there's always room to make the process more efficient. This blog post unpacks some strategies that can help you get the most out of your pipeline definition in [GitLab CI](/features/continuous-integration/).\n\n## How Directed Acyclic Graphs (DAG) enable concurrent pipelines\n\n![By using Needs keyword you can define dependencies for jobs that need to be used from previous stages.](https://about.gitlab.com/images/blogimages/dag-explained.jpeg)\nBy using the \"Needs\" keyword you can define dependencies for jobs that need to be used from previous stages.\n{: .note.text-center}\n\nIn a [basic-pipeline](https://docs.gitlab.com/ee/ci/pipelines/pipeline_architectures.html#basic-pipelines) structure, all the jobs in a particular stage run concurrently and the jobs in the subsequent stage have to wait on those to finish to get started. This continues for all the stages.\n\nIn the image above, the first job in the second stage only depends on the first two job in the first stage to get started. But with the basic pipeline order in place, it has to wait for all three jobs in the first stage to complete before it can start executing, which slows down the overall pipeline considerably. However, by using `needs:` keywords, you can define a direct dependency for the jobs and they would only have to wait on the job they depend on to get started. By using the [DAG strategy](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/), you could shed out a few minutes from the processes for a certain project, thereby increasing the pipeline execution speed and bringing down the CI minutes consumption.\n\nBy using `needs: []` you can make the job in any stage run immediately, as it doesn't have to wait on any other job to finish.\n\n## Why parallel jobs increase productivity\n\nNot all the jobs in a pipeline have an equal run-time. While some may take just a few seconds, some take much longer to finish. When there are many team members waiting on a running pipeline to finish to be able to make a contribution to the project, the productivity of the team takes a hit.\n\nGitLab provides a method to make clones of a job and run them in parallel for faster execution using the `parallel:` keyword. While [parallel jobs](https://docs.gitlab.com/ee/ci/yaml/#parallel) may not help in reducing the consumption of [CI minutes](/pricing/faq-compute-credit/), they definitely help increase work productivity.\n\n## Break down big pipelines with parallel matrix Jobs\n\nBefore the release of [parallel matrix jobs](https://docs.gitlab.com/ee/ci/yaml/#parallel-matrix-jobs), in order to run multiple instances of a job with different variable values, the jobs had to be manually defined in the `.gitlab-ci-yml` like this:\n\n```yaml\n.run-test:\n  script: run-test $PLATFORM\n  stage: test\n\ntest-win:\n  extends: .run-test\n  variables:\n    - PLATFORM: windows\ntest-mac:\n  extends: .run-test\n  variables:\n    - PLATFORM: mac\ntest-linux:\n  extends: .run-test\n  variables:\n    - PLATFORM: linux\n```\n\nParallel matrix jobs were released with GitLab 13.3 and allow you to create jobs at runtime based on specified variables. Let's say there is a need to run multiple instances a job with different variables values for each instance — with a combination of `parallel:` and `matrix:` you accomplish just that.\n\n```yaml\ntest:\n  stage: test\n  script: run-test $PLATFORM\n  parallel:\n    matrix:\n      - PLATFORM: [windows, mac, linux]\n```\n\nBy using `parallel:` and `matrix:`, big pipelines can be broken down into manageable parts for efficient maintainance.\n\n## Reduce the risk of merge conflicts with parent/child pipelines\n\n![Parent-child pipelines can include external YAML files in you configuration](https://about.gitlab.com/images/blogimages/parent-child-explained.jpeg)\nThe parent pipeline generates a child pipeline via the trigger:include keywords.\n{: .note.text-center}\n\nFor better management of dependencies, many organizations prefer a mono-repo setup for their projects. But mono-repos have a flip side too. If a repository hosts a large number of projects and a single pipeline definition is used to trigger different automated processes for different components, the pipeline performance is negatively affected. By using [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) you can design more efficient pipelines, since you can have multiple child-pipelines that run in parallel. The keyword `include:` is used to include external YAML files in your CI/CD configuration for this purpose. In the image above a pipeline (the parent) generates a child pipeline via the trigger:include keywords.\n\nThis approach also reduces the chances of merge conflicts from happening, as it allows to only edit a section of the pipeline if necessary.\n\n## Merge trains help the target branch stay stable\n\nWhen there's a lot of merge requests flowing into a project, there is a risk of merge conflicts. [Merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) is a powerful feature by GitLab that allows users to automatically merge a series of (queued) merge requests without breaking the target branch. Using this feature, you can add an MR to the train, and it would take care of it until it is merged.\n\n## Use multiple caches in the same job\n\nStarting 13.11, GitLab CI/CD provides the ability to [configure multiple cache keys in a single job](/releases/2021/04/22/gitlab-13-11-released/#use-multiple-caches-in-the-same-job) which will help you increase your pipeline performance. This functionality could help you save precious development time when the jobs are running.\n\n## How can an efficient pipeline save you money?\n\nBy using CI/CD strategies that ensure safe merging of new changes and a green master, organizations can worry less about unanticipated downtimes caused by infrastructural failures and code conflicts.\n\nWith faster pipelines, developers end up spending lesser time in maintenance and find time and space to bring in more thoughtfulness and creativity in their work, leading to improvements in code quality and the company atmosphere and morale.\n\nIf you are looking to bring down the cost of running your CI/CD pipelines for a large project, look up the [Artifact and cache settings](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#artifact-and-cache-settings) and [Optimizing GitLab for large repositories](https://docs.gitlab.com/ee/ci/large_repositories/) sections in the documentation.\n",[1090,1293,677,9],{"slug":3736,"featured":6,"template":680},"how-to-become-more-productive-with-gitlab-ci","content:en-us:blog:how-to-become-more-productive-with-gitlab-ci.yml","How To Become More Productive With Gitlab Ci","en-us/blog/how-to-become-more-productive-with-gitlab-ci.yml","en-us/blog/how-to-become-more-productive-with-gitlab-ci",{"_path":3742,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3743,"content":3748,"config":3753,"_id":3755,"_type":14,"title":3756,"_source":16,"_file":3757,"_stem":3758,"_extension":19},"/en-us/blog/how-to-fuzz-go",{"title":3744,"description":3745,"ogTitle":3744,"ogDescription":3745,"noIndex":6,"ogImage":690,"ogUrl":3746,"ogSiteName":667,"ogType":668,"canonicalUrls":3746,"schema":3747},"How to fuzz Go code with go-fuzz continuously","Learn how (and why!) to fuzz Go code","https://about.gitlab.com/blog/how-to-fuzz-go","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to fuzz Go code with go-fuzz continuously\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yevgeny Pats\"}],\n        \"datePublished\": \"2020-12-03\",\n      }",{"title":3744,"description":3745,"authors":3749,"heroImage":690,"date":3750,"body":3751,"category":698,"tags":3752},[2233],"2020-12-03","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n## What is fuzzing?\n\nFuzzing or fuzz testing is an automated software technique that involves providing semi-random data as\ninput to the test program in order to uncover bugs and crashes.\n\n## Why fuzz Go Code?\n\n[Golang](https://golang.org/) is a safe language and memory corruption issues are a thing of the past so we don’t need to fuzz our code,\nright? Wrong 😃. Any code, and especially code where stability, quality, and coverage are important, is worth fuzzing.\nFuzzing can uncover logical bugs and denial-of-service  in critical components can lead to security issues as well.\n\nAs a reference to almost infinite amount of bugs found with go-fuzz (only the documented one) you can look [here](https://github.com/dvyukov/go-fuzz#trophies)\n\n## Enter go-fuzz\n\n[go-fuzz](https://github.com/dvyukov/go-fuzz) is the current de-facto standard fuzzer for go and was initially developed by [Dmitry Vyukov](https://twitter.com/dvyukov).\nIt is a coverage guided fuzzer which means it uses coverage instrumentation and feedback to generate test-cases which proved to be very successful both by go-fuzz and originally by fuzzers like AFL.\n\ngo-fuzz algorithm and in general coverage guided fuzzers works as follows:\n\n```\n// pseudo code\nInstrument program for code coverage\nfor {\n  Choose random input from corpus\n  Mutate input\n  Execute input and collect coverage\n  If new coverage/paths are hit add it to corpus (corpus - directory with test-cases)\n}\n```\n\n## Building & Running\nIf you are already familiar with this part you can skip to \"Running go-fuzz from GitLab-CI\" section.\nwe will use [go-fuzzing-example](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example) as a simple example.\nFor the sake of the example we have a simple [function](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example/-/blob/master/parse_complex.go) with an off-by-one bug:\n\n```go\npackage parser\n\nfunc ParseComplex(data [] byte) bool {\n\tif len(data) == 5 {\n\t\tif data[0] == 'F' && data[1] == 'U' && data[2] == 'Z' && data[3] == 'Z' && data[4] == 'I' && data[5] == 'T' {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n```\n\nOur fuzz [function](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example/-/blob/master/parse_complex_fuzz.go) will look like this and will be called by go-fuzz in a infinite loop with the generated data according to the coverage-guided algorithm\n\n```\n// +build gofuzz\n\npackage parser\n\nfunc Fuzz(data []byte) int {\n\tParseComplex(data)\n\treturn 0\n}\n```\n\nTo run the fuzzer we need to build an instrumented version of the code together with the fuzz function.\nThis is done with the following simple steps:\n\n```\ndocker run -it golang /bin/bash\n\n# Download this example\ngo get gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example\ncd /go/src/gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example\n\n# download go-fuzz and clang (libfuzzer)\ngo get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build\napt update && apt install -y clang\n\n# building instrumented version of the code together with libFuzzer integration\ngo-fuzz-build -libfuzzer -o parse-complex.a .\nclang -fsanitize=fuzzer parse-complex.a -o parse-complex\n\n./parse-complex\n\n#490479 NEW    ft: 11 corp: 7/37b lim: 477 exec/s: 11962 rss: 25Mb L: 6/6 MS: 1 ChangeByte-\n#524288 pulse  ft: 11 corp: 7/37b lim: 509 exec/s: 11915 rss: 25Mb\n#1048576        pulse  ft: 11 corp: 7/37b lim: 1030 exec/s: 11915 rss: 25Mb\npanic: runtime error: index out of range [6] with length 6\n\ngoroutine 17 [running, locked to thread]:\ngitlab.com/fuzzing-examples/example-go.ParseComplex.func6(...)\n        /go/src/gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example/parse_complex.go:5\ngitlab.com/fuzzing-examples/example-go.ParseComplex(0x36f1cd0, 0x6, 0x6, 0x7ffeaa0d1f80)\n        /go/src/gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example/parse_complex.go:5 +0x1b8\ngitlab.com/fuzzing-examples/example-go.Fuzz(...)\n        /go/src/gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example/parse_complex_fuzz.go:6\nmain.LLVMFuzzerTestOneInput(0x36f1cd0, 0x6, 0x18)\n        gitlab.com/fuzzing-examples/example-go/go.fuzz.main/main.go:35 +0x85\nmain._cgoexpwrap_98ba7f745c88_LLVMFuzzerTestOneInput(0x36f1cd0, 0x6, 0x5a4d80)\n        _cgo_gotypes.go:64 +0x37\n==1664== ERROR: libFuzzer: deadly signal\n    #0 0x450ddf in __sanitizer_print_stack_trace (/go/src/gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example/parse-complex+0x450ddf)\n    #1 0x430f4b in fuzzer::PrintStackTrace() (/go/src/gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example/parse-complex+0x430f4b)\n    #2 0x414b7b in fuzzer::Fuzzer::CrashCallback() (/go/src/gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example/parse-complex+0x414b7b)\n    #3 0x414b3f in fuzzer::Fuzzer::StaticCrashSignalCallback() (/go/src/gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/go-fuzzing-example/parse-complex+0x414b3f)\n    #4 0x7f57c561d72f  (/lib/x86_64-linux-gnu/libpthread.so.0+0x1272f)\n    #5 0x4b3a00 in runtime.raise runtime/sys_linux_amd64.s:164\n\nNOTE: libFuzzer has rudimentary signal handlers.\n      Combine libFuzzer with AddressSanitizer or similar for better crash reports.\nSUMMARY: libFuzzer: deadly signal\nMS: 1 ChangeByte-; base unit: eef4acc7500228bd0f65760be21896f230e0e39f\n0x46,0x55,0x5a,0x5a,0x49,0x4e,\nFUZZIN\nartifact_prefix='./'; Test unit written to ./crash-14b5f09dd74fe15430d803af773ba09a0524670d\nBase64: RlVaWklO\n```\n\nThis finds the bug in a few seconds, prints the “FUZZI” string that triggers the vulnerability, and saves the crash to a file.\n\n## Running go-fuzz from Gitlab-CI\nThe best way to integrate go-fuzz fuzzing with Gitlab CI/CD is by adding additional stage & step to your `.gitlab-ci.yml`.\nIt is straightforward and [fully documented](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#configuration).\n\n```\ninclude:\n  - template: Coverage-Fuzzing.gitlab-ci.yml\n\nfuzz_test_parse_complex:\n    extends: .fuzz_base\n    image: golang\n    script:\n        - go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build\n        - apt update && apt install -y clang\n        - go-fuzz-build -libfuzzer -o parse-complex.a .\n        - clang -fsanitize=fuzzer parse-complex.a -o parse-complex\n        - ./gl-fuzz run --regression=$REGRESSION -- ./parse-complex\n\n```\n\nFor each fuzz target you will will have to create a step which extends the `.fuzz_base` template that runs the following:\n\n- Builds the fuzz target.\n- Runs the fuzz target via gl-fuzz CLI.\n- For `$CI_DEFAULT_BRANCH` (can be override by `$COV_FUZZING_BRANCH`) will run fully fledged fuzzing sessions.\nFor everything else including MRs will run fuzzing regression with the accumlated corpus and fixed crashes.\n\nThis will run your fuzz tests in a blocking manner inside your pipeline. There is also a possability to run longer fuzz sessions asynchronously described in the [docs](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#continuous-fuzzing-long-running-async-fuzzing-jobs)\n\nCheck out our [full documentation](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) and the [example repo](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example)\nand try adding fuzz testing to your own repos!\n",[9,720,722],{"slug":3754,"featured":6,"template":680},"how-to-fuzz-go","content:en-us:blog:how-to-fuzz-go.yml","How To Fuzz Go","en-us/blog/how-to-fuzz-go.yml","en-us/blog/how-to-fuzz-go",{"_path":3760,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3761,"content":3767,"config":3773,"_id":3775,"_type":14,"title":3776,"_source":16,"_file":3777,"_stem":3778,"_extension":19},"/en-us/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler",{"title":3762,"description":3763,"ogTitle":3762,"ogDescription":3763,"noIndex":6,"ogImage":3764,"ogUrl":3765,"ogSiteName":667,"ogType":668,"canonicalUrls":3765,"schema":3766},"How to protect your source code with GitLab and Jscrambler","Learn how to seamlessly protect your source code at build time in just a few steps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669529/Blog/Hero%20Images/gitlab-jscrambler-blog-post-protecting-source-code.png","https://about.gitlab.com/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to protect your source code with GitLab and Jscrambler\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pedro Fortuna\"},{\"@type\":\"Person\",\"name\":\"Sam Kerr\"}],\n        \"datePublished\": \"2021-06-09\",\n      }",{"title":3762,"description":3763,"authors":3768,"heroImage":3764,"date":3770,"body":3771,"category":743,"tags":3772},[3769,1337],"Pedro Fortuna","2021-06-09","\nDevelopment teams are building, testing, and shipping code faster than ever before. Today, we know that security has a role to play at the early stages of the [DevOps workflow](/topics/devops/), but these security controls are mostly centered around finding and fixing bugs and vulnerabilities during development.\n\nIn this tutorial, we will explore the importance of protecting client-side application code at runtime and guide you through implementing it in your GitLab instance using the integration with [Jscrambler](https://jscrambler.com/).\n\n## The importance of runtime code protection\n\nWith web and mobile applications dealing with increasingly sensitive data, addressing the application's attack surface requires considering additional threats that are not directly linked to vulnerabilities.\n\nThis concern has been widely covered in NIST, ISO 27001, and some of the latest iterations of OWASP guides, such as the [Mobile Application Security Verification Standard](https://mobile-security.gitbook.io/masvs/). These information security standards highlight that attackers who gain unwarranted access to the application's source code may be able to retrieve proprietary code, find ways to bypass app restrictions, and make more progress while planning/automating data exfiltration attacks.\n\nAs such, it's important that companies implement an additional security layer (on top of application security best practices) to tackle the threats of tampering and reverse engineering of an application's source code.\n\n## Getting started with Jscrambler + GitLab\n\nA robust code protection approach must include multiple layers to raise the bar for reverse-engineering and tampering attempts. Jscrambler achieves this by using a combination of code protection techniques, including obfuscation, code locks, runtime protection, and threat monitoring.\n\nLet's see how you can easily set up this layered source code protection using Jscrambler in your GitLab instance.\n\n### What you need for the Jscrambler integration\n\nTo use this integration with Jscrambler, make sure that you meet the following prerequisites:\n\n* A JavaScript-based project, as Jscrambler can protect JavaScript-based web and hybrid mobile apps\n* A [Jscrambler account](https://jscrambler.com/signup)\n* A GitLab instance where the Jscrambler integration will run\n\n### How to configure Jscrambler\n\nThe first step of this integration is to define the Jscrambler code protection techniques you want to use. The best way to do this is through the [Jscrambler web app](https://app.jscrambler.com/). You can either select one of the pre-defined templates or pick techniques one by one. Review [the Jscrambler guide](https://blog.jscrambler.com/jscrambler-101-first-use/) for further instructions on choosing Jscrambler techniques. No matter what you choose, download Jscrambler's JSON configuration file by clicking the download button next to the Application Settings, as shown below.\n\n![Jscrambler_download_JSON](https://about.gitlab.com/images/blogimages/jscrambler-app-download-json.gif \"How to download Jscrambler's JSON config.\")\nHow to download Jscrambler's JSON config.\n{: .note.text-center}\n\nPlace the file you just downloaded in your project's root folder and rename it to `.jscramblerrc`. Now, open the file and make sure you remove the access and secret keys from this configuration file by removing the following lines.\n\n```json\n \"keys\": {\n   \"accessKey\": \"***********************\",\n   \"secretKey\": \"***********************\"\n },\n```\n\nThis will prevent having hardcoded API keys, which could pose security issues. You should store these API keys using the [GitLab CI environment variables](https://docs.gitlab.com/ee/ci/variables/), as shown below.\n\n![Jscrambler API keys as GitLab environment variables](https://docs.jscrambler.com/637a78d94e016c8be1866edb0627f2bc.png)\nWhere to score Jscrambler's API keys in GitLab.\n{: .note.text-center}\n\nAnd that's all you need from Jscrambler's side!\n\n### Configuring a Jscrambler job inside GitLab CI\n\nStart by checking you have placed the `.gitlab-ci.yml` file at the root of your project. Inside this file, you will need to define your `build` stage, as well as add a new `protect` stage, as shown below.\n\n```yml\nstages:\n - build\n - protect\n # - deploy\n # ...\n```\n\nThe `build` stage should be configured as follows:\n\n```yml\nbuild:production:\n stage: build\n artifacts:\n   when: on_success\n   paths:\n     - build\n script:\n   - npm i\n   - npm run build\n```\n\nThis configuration will run the `npm run build` command, which is a standard way of building your app to production, placing the resulting production files in the `/build` folder. Plus, it ensures that the `/build` folder becomes available as a [GitLab CI artifact](https://docs.gitlab.com/ee/ci/pipelines/job_artifacts.html) so that it can be used later in other jobs.\n\nHere, make sure that you set the build commands and build folder according to your own project, as these may vary.\n\nNext, configure the `protect` stage as shown below:\n\n```yml\nbuild:production:obfuscated:\n stage: protect\n before_script:\n   - npm i -g jscrambler\n dependencies:\n   - build:production\n artifacts:\n   name: \"$CI_JOB_NAME\"\n   when: on_success\n   paths:\n     - build\n   expire_in: 1 week\n script:\n   # By default, all artifacts from previous stages are passed to each job.\n   - jscrambler -a $JSCRAMBLER_ACCESS_KEY -s $JSCRAMBLER_SECRET_KEY -o ./ build/**/*.*\n```\n\nThis stage starts by installing the Jscrambler npm package globally. Next, it is configured to execute Jscrambler at the end of each new production build process. Typically, you will want to ensure that Jscrambler is the last stage of your build process, because Jscrambler transforms the source code extensively and can also add [anti-tampering protections](https://docs.jscrambler.com/code-integrity/documentation/transformations/self-defending). This means changing the files after they have been protected by Jscrambler may break the app functionality.\n\nThis `protect` stage is configured to access the Jscrambler API keys that have been loaded as GitLab environment variables. Finally, the output of the protection is placed into the same `/build` folder and made available as a GitLab CI artifact for posterior use (e.g., a deploy job).\n\nNote that while this example shows how to use the Jscrambler CLI client to protect the code, Jscrambler is compatible with [other clients](https://docs.jscrambler.com/code-integrity/documentation/api/clients), such as Grunt, Gulp, webpack, Ember, and Metro (React Native).\n\nAnd, that's all there is to it! You can configure your `deploy` stage as usual, which should access the contents of the `build/` folder and ensure your protected files are available in a live production environment.\n\n### Checking the protection result\n\nAs a final (optional) step, you might want to check the live app and see what its source code looks like. You can do that easily by using a browser debugger and opening the files from the \"Sources\" tab. The protected code should look completely unintelligible, similar to the one shown below.\n\n![Source code protected by Jscrambler](https://i.imgur.com/HXLZyFh.png)\nExample of murky source code protected by Jscrambler.\n{: .note.text-center}\n\nJust bear in mind that, in case you are using Jscrambler's anti-debugging transformations, your browser debugger will likely crash or derail the app execution. This is intended behavior, which is very useful to prevent reverse-engineering of the code.\n\n## Final thoughts\n\nAs we saw in this tutorial, setting up this integration between Jscrambler and GitLab is very straightforward. It introduces a new `protect` stage where the JavaScript source code is protected by Jscrambler before deployment.\n\nJscrambler goes well beyond JavaScript obfuscation since it provides runtime protection techniques such as [self defending](https://docs.jscrambler.com/code-integrity/documentation/transformations/self-defending) and [self healing](https://docs.jscrambler.com/code-integrity/documentation/transformations/self-healing), which provide anti-tampering and anti-debugging capabilities, as well as [code locks](https://docs.jscrambler.com/code-integrity/documentation/client-side-countermeasures). For more details about Jscrambler transformations, review [Jscrambler's documentation page](https://docs.jscrambler.com/).\n\n## Watch the demo\n\nMore of a video person? Watch the demo on how to protect your source code using GitLab and Jscrambler.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/aBx2Vtbe-1w\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[1440,720,9],{"slug":3774,"featured":6,"template":680},"how-to-protect-your-source-code-with-gitlab-and-jscrambler","content:en-us:blog:how-to-protect-your-source-code-with-gitlab-and-jscrambler.yml","How To Protect Your Source Code With Gitlab And Jscrambler","en-us/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler.yml","en-us/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler",{"_path":3780,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3781,"content":3787,"config":3792,"_id":3794,"_type":14,"title":3795,"_source":16,"_file":3796,"_stem":3797,"_extension":19},"/en-us/blog/how-to-push-code-from-a-hammock",{"title":3782,"description":3783,"ogTitle":3782,"ogDescription":3783,"noIndex":6,"ogImage":3784,"ogUrl":3785,"ogSiteName":667,"ogType":668,"canonicalUrls":3785,"schema":3786},"How to push code from a hammock","Our remote work dream team balances globetrotting with career advancement at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678958/Blog/Hero%20Images/hammock.jpg","https://about.gitlab.com/blog/how-to-push-code-from-a-hammock","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to push code from a hammock\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-09-23\",\n      }",{"title":3782,"description":3783,"authors":3788,"heroImage":3784,"date":3789,"body":3790,"category":808,"tags":3791},[672],"2019-09-23","\n_At GitLab, our team doesn’t wake up at the same time and commute the same routes to sit in the same office. In fact, some of our team members don’t have an office at all! As a globally distributed company with an all-remote workforce, we have an exceptionally diverse set of team members dispersed on multiple continents. In this series, we explore how GitLab team members use the autonomy our company affords them to create workspaces that suit their lifestyle and cater to their hierarchy of needs: Whether that involves creating a cozy home office space, or diving into the unknown by working while traveling. New here? Go back to [read part one](/blog/not-everyone-has-a-home-office/) of our remote work series._\n\nFor many career-minded individuals, the desire to travel for prolonged periods comes at a cost. Sometimes, the only options are to plan for rushed two-week bursts of vacation, wait for retirement, or leave your career behind.\n\nBack in 2013, I had the opportunity to volunteer at an NGO in Hanoi for a month, but in order to do so, I had to make a choice: Leave my job as a reporter, or pass on the opportunity to work in Vietnam. My reporting job only allotted employees two weeks of annual paid time off (PTO) and didn’t allow for remote work at all. After some (but not much) deliberation, I left my job and went to Hanoi.\n\nEven in some companies where working remotely is an option, if all-remote isn’t part of the company culture, traveling abroad isn’t always feasible. [Erich Wegscheider](/company/team/#ewegscheider), talent operations specialist at GitLab, knows firsthand that [not all remote is created equal](/blog/not-all-remote-is-created-equal/).\n\nErich went to Europe for a few weeks while working remotely at a previous job with big plans to travel and work. But because he was tethered to working Pacific Time hours, the logistics made setting up an effective workday a challenge.\n\n“Sure, I was ‘remote,’ but the reality was that I worked in the equivalent of a satellite office by myself,” says Erich in a previous post. “Another detractor to working remotely was that it wasn’t conducive to my career development. Given that my colleagues worked at the office in California, the opportunity to lead or manage a team wasn’t presented, given my desire for location independence.”\n\nToday, Erich is able to balance career advancement with wanderlust. He’s currently delivering results for GitLab while lounging beachside from Bali before heading out to a new location as part of the adventure of a lifetime with [WiFi Tribe](https://wifitribe.co/).\n\nErich’s experience of working from Bali is hardly an anomaly at an all-remote company like GitLab. In fact, if GitLab somehow had geolocation enabled on our [contribute graphs](/blog/how-do-you-contribute/), you’d find code pushed from vans, hammocks, and likely some ancient ruins too. Where WiFi is enabled, GitLab is there.\n\n## “Where are you calling from now?”\n\nCaroline, people experience associate at GitLab, turned heads during our daily breakout call earlier this month by dispatching from [Chiostro del Bramante](https://www.chiostrodelbramante.it/), a stunning art museum in Rome.\n\n![Breakout call at Chiostro del Bramante](https://about.gitlab.com/images/blogimages/allremote-travel/breakoutcall.jpg){: .shadow.medium.center}\nCaroline joins our breakout call from the Chiostro del Bramante art museum in Rome.\n{: .note.text-center}\n\n“There is a coffee bar on the first floor with an outdoor sitting area overlooking the atrium,” says Caroline. “Hands down the best suggestion I have gotten from [workfrom.co](https://workfrom.co/) which is my number one go-to place to find ‘anything with good WiFi’ to work from when I land in a new city.”\n\nIt’s good to have these resources, because Caroline finds herself working from a new city often. Her visit to Chiostro del Bramante was right in the middle of her two-month long tour of Europe.\n\n“I started from Berlin, Germany, and have traveled to Prague, Czechoslovakia; Vienna, Austria; Budapest, Hungary; Zagreb, Serbia; Venice, Milan, Florence, and Rome in Italy; Barcelona and Madrid, Spain, and now Lisbon, Portugal” says Caroline. “From here I intend to proceed on Paris, Lyon, and Marseilles in France; Brussels, Belgium; Amsterdam, Netherlands; Geneva, Switzerland; Greece and Santorini, and finally Qatar before I return back home.”\n\n“I am a nature-holic and I always try to find the hidden parks and waterfalls, even in big cities,” says Caroline. “But this has been a big city tour because I am a village girl and curiosity won't let me. I plan to do another rural places trip next year in most of these countries.”\n\nCaroline lives in Nairobi, Kenya on a small half urban, half rural community called [Kinoo](https://www.google.com/maps/place/Kinoo,+Kikuyu,+Kenya/@-1.2520949,36.6834461,15z/data=!3m1!4b1!4m5!3m4!1s0x182f1ed5ba8b4527:0x1c9818f290cb069!8m2!3d-1.2526258!4d36.6930253) “with lots of tea leaves and sheep.”\n\n[Mike Miranda](/company/team/#mikemiranda), SMB professional advocate at GitLab, lives roughly 9,600 miles from Kinoo in Los Angeles, CA, but like Caroline, he has a full passport from his time working at GitLab.\n\nMike spent about half of 2019 globetrotting, and traveled quite a bit in 2018 as well, visiting: Amsterdam, Netherlands; Sofia, Bulgaria; Kyiv, Ukraine; Izmail, Ukraine; London, England; Budapest, Hungary; Dublin, Ireland; Lisbon and Porto in Portugal; Krakow, Poland; Barcelona and Madrid, Spain; Tel Aviv, Israel; Jerusalem, Israel; returning to Spain in Pamplona; Belgrade, Serbia; Berlin, Germany; Paris, France; Florence, Italy; circling back to Germany in Cologne; Burgas, Bulgaria, and even more cities across the United States, all while working for GitLab full-time.\n\nUnlike Caroline, Mike tends to gravitate more toward urban environments while traveling, but also loves visiting some rural locations as well: “I prefer the hidden gems though I definitely spent plenty of time in classic tourist cities.”\n\n## GitLab wants you to travel and visit team members\n\nGitLab’s all-remote set-up introduces a world of possibilities to our team members, many of whom saw their travel opportunities restricted in the past by colocated workspaces. So, when the opportunities are endless, which direction do you head?\n\nWhy not take a page out of [Douwe Maan](/company/team/#DouweM) and [Robert Speicher](/company/team/#rspeicher)’s playbook, and visit your colleagues with the help of GitLab. After all, GitLab has more than 888 team members across 57 countries and regions on five continents (these numbers are always growing!). The [GitLab visiting grant](/handbook/incentives/#visiting-grant) will help you pay for your travels, allotting $150 toward your transportation costs for each GitLab team member that you see on your journey. For example, if you have plans to travel to the San Francisco Bay Area to visit family and join a coworking day with six local GitLabbers, up to $900 in travel costs (6 x 150 = 900) will be reimbursed.\n\nThis program was inspired by Douwe and Robert, who spent six months of 2016 [traveling around the world](/blog/around-the-world-in-6-releases/), visiting 49 colleagues in 20 cities on all five continents. Their journey started in Robert’s home in Washington D.C., and ended in Douwe’s home in Amsterdam. This experience opened up a new perspective not just on how our colleagues live but how they worked as well.\n\n“While you hear about things going on in people's lives, about the places they live, and about issues they face, it's hard to truly appreciate and understand these different perspectives at a distance of hundreds, thousands, or tens of thousands of miles,” writes Douwe in a previous post. “Visiting them, getting to know them in their ‘natural habitat,’ and experiencing some bits of their life yourself, brings you closer to that understanding than anything else.”\n\nFor instance, [a day in the life of a GitLab team member](/blog/day-in-the-life-remote-worker/) based in the United States is different from an Asia-Pacific (APAC)-based team member, as time zones create major differences in when Slack is buzzing, when the company call is, etc.\n\n## The biggest challenges facing digital nomads\n\n[GitLab prioritizes written, asynchronous communication](/handbook/communication/#introduction), which is largely why all remote works so effectively for our company. But sometimes, you have to take the inconveniently timed meeting anyway. Caroline says being available for meetings across different time zones has been one of the biggest challenges with global travel.\n\n“The biggest part of what [being] a digital nomad involves is having the flexibility to determine your hours and find a perfect balance between work and discovering your current location,” says Caroline. “Meetings are usually fixed times that sometimes just mess up your entire preplanned flow. I have learned to be flexible. To interrupt an afternoon of site-seeing with a quick dash into a coffee shop for a quick meeting or to plan my day around a meeting.”\n\nErich is in Bali, so he’s currently in the APAC time zone. This means his evenings typically conclude with a few work-related calls and meetings.\n\n![The Mocca in Canggu, Bali](https://about.gitlab.com/images/blogimages/allremote-travel/the-mocca-canggu.jpg){: .shadow.small.center}\nThe Mocca is one of the cafes Erich works at during his time in Canggu, Bali.\n{: .note.text-center}\n\n“Another fun part about being on APAC time, when the Americas is the norm, is that weekends are shifted,” says Erich. “It's a 12-hour time difference to Eastern Time and 15 to Pacific, so Monday is essentially my Sunday. That means I usually work Saturday morning, but that's been fine by me thus far! The schedule allows plenty of time to get out and explore. Best of all, Mondays are generally quiet travel-wise, so it's a great time to move around the island as well.”\n\nMike experienced everything from whitewater rafting in Sofia, Bulgaria to thermal bath parties in Budapest, Hungary during his time abroad, but life as a digital nomad isn’t one giant vacation.\n\n“Timezone was initially a challenge and that required being intentional about a schedule and sticking with it,” says Mike. “Also, it was difficult to get into a comfortable routine and sometimes taxing to constantly be living out of a suitcase.”\n\nFor Mike, establishing a routine became critical to staying centered in ever-changing environments: “I would identify a coworking space or understand if the WiFi would work well for calls, know where and how I would exercise, know my work hours given the timezone.”\n\nBut his most important advice? Enjoy the adventure.\n\n“While it's not a vacation, make sure you take your work hours seriously and outside of that time enjoy the city you're in and the people you're around – I can't overstate how important it is to unplug.”\n\nCover Photo by Trinity Treft on Unsplash.\n{: .note}\n",[832,9],{"slug":3793,"featured":6,"template":680},"how-to-push-code-from-a-hammock","content:en-us:blog:how-to-push-code-from-a-hammock.yml","How To Push Code From A Hammock","en-us/blog/how-to-push-code-from-a-hammock.yml","en-us/blog/how-to-push-code-from-a-hammock",{"_path":3799,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3800,"content":3806,"config":3812,"_id":3814,"_type":14,"title":3815,"_source":16,"_file":3816,"_stem":3817,"_extension":19},"/en-us/blog/how-we-added-eslint-into-vue",{"title":3801,"description":3802,"ogTitle":3801,"ogDescription":3802,"noIndex":6,"ogImage":3803,"ogUrl":3804,"ogSiteName":667,"ogType":668,"canonicalUrls":3804,"schema":3805},"How eslint-plugin-vue improved our code reviews","A few months ago we felt the need to build a style guide for Vue and now are using eslint-vue-plugin, which is saving us time in our code reviews.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680227/Blog/Hero%20Images/code_cover_image.jpg","https://about.gitlab.com/blog/how-we-added-eslint-into-vue","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How eslint-plugin-vue improved our code reviews\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Filipa Lacerda\"}],\n        \"datePublished\": \"2018-02-13\",\n      }",{"title":3801,"description":3802,"authors":3807,"heroImage":3803,"date":3809,"body":3810,"category":743,"tags":3811},[3808],"Filipa Lacerda","2018-02-13","\n\nWe've (finally) integrated [eslint-plugin-vue](https://github.com/vuejs/eslint-plugin-vue) successfully into our codebase!\n\n\u003C!-- more -->\n\nWhen we [added Vue](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/5845) to our codebase back in April 2016, [eslint-plugin-vue](https://github.com/vuejs/eslint-plugin-vue) did not yet [exist](https://github.com/vuejs/eslint-plugin-vue/commit/6a3a6db540e823b51af1e02950896ac9c2b49219) and we had not yet started using [eslint](https://eslint.org/) at all.\n\nOne of the things I love the most about GitLab being an open source tool is that anyone can contribute! [Winnie Hellmann](https://gitlab.com/winh), who has since joined the team, did this amazing work [adding eslint](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/5445) as a community contribution. Thanks Winnie! 🙇‍\n\n## The start of a style guide\n\nAs our Vue codebase grew from a few features to quite a large usage ([issue boards](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/5554), [environments](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/8954), [cycle analytics](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/7366), [pipelines](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/10878)) we noticed that each of our Vue apps followed a different style. At that time we felt the need to [document how to architecture a Vue application](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/8866) to ensure a consistent codebase. Once we defined and documented how to use the component system and Flux architecture [with our codebase](https://docs.gitlab.com/ee/development/fe_guide/vue.html#vue-architecture), we noticed that our Vue code also differed in very small things, such as indentation or the order we declared the methods. These inconsistencies, although small, increased the complexity of the review process and for maintaining a healthy codebase.\n\nWith the goal of decreasing the time we spent reviewing Vue code and debating on each of these aspects, and because at the time there wasn't an official Vue style guide, [we started our own](https://gitlab.com/gitlab-org/gitlab-ce/commit/8c3bdc853a5237a3bef6e26fcf22132db7e8bd9c)! You can check out our documentation [here](https://docs.gitlab.com/e e/development/fe_guide/style_guide_js.html#vue-js). As the Vue community grew, the need for an official style guide and for an eslint plugin for Vue grew with it. Thanks to the wonderful team [Michał Sajnóg](https://github.com/michalsnik), [Toru Nagashima](https://github.com/mysticatea), [Armano](https://github.com/armano2) and [Chris Fritz](https://github.com/chrisvfritz) leading the development of such a tool, we are now able to use it in production! And we even got to act as source of [inspiration for the official one](https://github.com/vuejs/eslint-plugin-vue/issues/77#issuecomment-315834845) ❤\n\n## Adding eslint-vue-plugin\n\nAfter [waiting a couple of months](https://gitlab.com/gitlab-org/gitlab-ce/issues/34312) for a stable version of [eslint-plugin-vue](https://github.com/vuejs/eslint-plugin-vue), we finally gave it a try once version [4.0.0](https://github.com/vuejs/eslint-plugin-vue/releases/tag/v4.0.0) was released.\n\n![EE Conflicts](https://about.gitlab.com/images/eslint-vue-plugin/eslint-conflicts-team-help.png \"EE Conflicts\"){: .shadow}\n\n*\u003Csmall>Frontend team working together to resolve all the vue eslint problems\u003C/small>*\n\nIt took a couple of days to fix all the problems eslint identified in our code, but we were able to successfully add it and thanks to a huge team effort, the second row of conflicts was solved very quickly. Thanks again Luke, Eric, Kushal and José!\n\nNow our review process is even faster, we don't have to manually check for the style guide rules anymore! 🎉\n\n[Cover image](https://pixabay.com/en/computer-computer-code-screen-1209641/) by [Free-Photos](https://pixabay.com/en/users/Free-Photos-242387/) is licensed under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)\n{: .note}\n",[9,3138],{"slug":3813,"featured":6,"template":680},"how-we-added-eslint-into-vue","content:en-us:blog:how-we-added-eslint-into-vue.yml","How We Added Eslint Into Vue","en-us/blog/how-we-added-eslint-into-vue.yml","en-us/blog/how-we-added-eslint-into-vue",{"_path":3819,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3820,"content":3826,"config":3833,"_id":3835,"_type":14,"title":3836,"_source":16,"_file":3837,"_stem":3838,"_extension":19},"/en-us/blog/how-we-apply-gitlab-values-to-our-bug-bounty-council-process",{"title":3821,"description":3822,"ogTitle":3821,"ogDescription":3822,"noIndex":6,"ogImage":3823,"ogUrl":3824,"ogSiteName":667,"ogType":668,"canonicalUrls":3824,"schema":3825},"Inside the Bug Bounty Council at GitLab","We improve consistency across severity ratings and payouts in our bug bounty program with collaboration, iteration, and async communication.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681956/Blog/Hero%20Images/gitlab-values-header.png","https://about.gitlab.com/blog/how-we-apply-gitlab-values-to-our-bug-bounty-council-process","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside the Bug Bounty Council at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Kelly\"}],\n        \"datePublished\": \"2021-03-16\",\n      }",{"title":3821,"description":3822,"authors":3827,"heroImage":3823,"date":3829,"body":3830,"category":720,"tags":3831},[3828],"Andrew Kelly","2021-03-16","\n\nThe [Application Security (AppSec) team at GitLab](/handbook/security/security-engineering/application-security/) works closely with engineering and product teams to ensure the security of our products. There’s another group we also work with regularly to secure our product -- the amazing hackers who submit reports to us via [our bug bounty program](https://hackerone.com/gitlab). These talented individuals from around the world research and identify security vulnerabilities in GitLab and submit bug reports detailing their findings. GitLab’s AppSec team verifies and triages the findings and the reporters are rewarded a bounty for making our product stronger. \n\nBeyond the cold hard cash, we’re continually looking for ways to recognize and further engage the deep talent and expertise of the security researchers that contribute to our program.  We’ve started a new blog series, “Ask a Hacker” and just featured `@ajxchapman` in this [latest blog post](/blog/ajxchapman-ask-a-hacker/). We’ve also kicked off a series of public Ask Me Anything (AMA) sessions with hackers who contribute to our program and we’ve got one coming up with [Alex Chapman](https://hackerone.com/ajxchapman) on **March 22 at 15:30 UTC** ([see the world clock](https://www.timeanddate.com/worldclock/fixedtime.html?msg=GitLab+AMA+with+Bug+Bounty+Hunter%2C+Alex+Chapman&iso=20210322T0830&p1=224&am=25)) and we hope you’ll join us! \n\n**Get all of the details in [this Google Form](https://docs.google.com/forms/d/e/1FAIpQLSd_FFsK58KmUzYYIRU2P6BItjx1L9gnGrGY_RPz7_1pHTADAg/viewform), including how to get an invite.** \n\n![Ajxchapman AMA](https://about.gitlab.com/images/blogimages/ama-with-alexchapman-blog.png){: .large.center}\n\n## Achieving consistent severity and bounty assessments through collaboration\nWe strive to be open about as many things as possible and one of GitLab’s core values is [transparency](https://handbook.gitlab.com/handbook/values/#transparency). In bug bounty programs, we know there can be confusion around how severity levels and specific bounty awards are determined for a given report. So, we want to provide some insight into the GitLab Bug Bounty Council process and how we use it to ensure collaboration and consistency across our severity and bounty assessments.\n\n### The mechanics of the council\nWe try to [dogfood](/handbook/engineering/development/principles/#dogfooding) as much as possible, so our Bug Bounty Council process relies heavily on the use of an [issue tracker](https://docs.gitlab.com/ee/user/project/issues/) specifically set up for the AppSec team. Every week, a bot creates a new Bug Bounty Council issue, which serves as the source of truth for discussions and decisions made about any verified vulnerabilities that came in through HackerOne that week. [Asynchronous communication](/company/culture/all-remote/asynchronous/) is critical for bounty discussions since our AppSec team is distributed around the world. As of writing this post, we have team members spread across multiple time zones in 10 different countries.\n\nWhen a HackerOne report [gets triaged](/handbook/security/security-engineering/application-security/runbooks/hackerone-process.html#working-the-queue), an issue comment thread is created on the current week’s Bug Bounty Council issue. This comment thread is where any discussion about a specific report and/or bounty will happen and typically includes:\n- Link to the HackerOne report\n- Brief description of the finding\n- A recommendation for the bounty amount\n- References to similar issues and bounty amounts that were paid, if available\n- The [CVSSv3](https://en.wikipedia.org/wiki/Common_Vulnerability_Scoring_System) vector string for the vulnerability\n\nThe team member triaging the report can add any additional information, discussion items, or questions that they may have for the broader team, and the weekly council has become a great place for our AppSec engineers to solicit feedback from team members about the findings themselves. Other members of the AppSec team are then encouraged to share their feedback about the severity, consistency with other similar reports, or bounty amount.  In the case of bounty amounts, this number is ultimately determined once a particular suggestion has received at least two thumbs-up emoji (👍) from other AppSec team members.\n\n## Applying iteration to improve efficiency and accuracy\nWe’re always looking for ways to embrace [iteration](https://handbook.gitlab.com/handbook/values/#iteration) and improve our processes. Recently our amazing [security automation](/handbook/security/security-engineering/automation/) team configured things so that triaged reports are automatically added to the Bug Bounty Council issues, which saves our triagers time and ensures that every report gets discussed.\n\nAnother iteration implemented in the past few months is the addition of a requirement that each vulnerability get an approval on the CVSSv3 vector string in addition to the bounty amount. CVSS scores attempt to describe the characteristics of a vulnerability and include a numerical score that represents the severity. Each proposed CVSSv3 score is up for discussion and requires at least two bug emoji (🐛) from other AppSec team members. The goal here is to make our CVSSv3 vector strings as accurate as possible before a CVE is requested through GitLab’s [CVE Numbering Authority](/security/cve/).\n\n## Iterating towards increased transparency\nThe Bug Bounty Council is an internal process meant to increase collaboration on the decision making involved in severity and bounty determinations. And, through this function-wide collaboration and documented discussion, we can already see improvements in consistency across level-setting. Naturally, transparency around this process can be improved and that’s what we’re aiming to do. We’re exploring ways to further utilize CVSS in our process as well as incorporating a CVSS calculator around both severity and bounty determinations, bringing a whole new level of transparency to this process. We’re really looking forward to when we can implement and announce these changes and know it will be a welcome iteration by the bug bounty reporter community.\n\n## New features released, 22nd of each and every month\nOur bug bounty program is open (public since December 2018) and anyone can participate. If you’re interested in collaborating with us to make our platform more secure please feel free to submit a bug bounty report to us! This feels like a great time to remind first-time and veteran reporters, too, that we release new features on the 22nd of every month. You can learn more about [our release process](/releases/), see the [latest monthly release blog post](/releases/categories/releases/) and see what's coming in [future releases](/upcoming-releases/). Interested bug hunters may just find *something new* that piques their interest.😜\n",[720,3832,9],"bug bounty",{"slug":3834,"featured":6,"template":680},"how-we-apply-gitlab-values-to-our-bug-bounty-council-process","content:en-us:blog:how-we-apply-gitlab-values-to-our-bug-bounty-council-process.yml","How We Apply Gitlab Values To Our Bug Bounty Council Process","en-us/blog/how-we-apply-gitlab-values-to-our-bug-bounty-council-process.yml","en-us/blog/how-we-apply-gitlab-values-to-our-bug-bounty-council-process",{"_path":3840,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3841,"content":3847,"config":3853,"_id":3855,"_type":14,"title":3856,"_source":16,"_file":3857,"_stem":3858,"_extension":19},"/en-us/blog/how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo",{"title":3842,"description":3843,"ogTitle":3842,"ogDescription":3843,"noIndex":6,"ogImage":3844,"ogUrl":3845,"ogSiteName":667,"ogType":668,"canonicalUrls":3845,"schema":3846},"How we are closing the gap on replicating *everything* in GitLab Geo","Developing an internal framework to enable other teams to add Geo support for their features","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we are closing the gap on replicating *everything* in GitLab Geo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Kozono\"}],\n        \"datePublished\": \"2021-04-29\",\n      }",{"title":3842,"description":3843,"authors":3848,"heroImage":3844,"date":3850,"body":3851,"category":698,"tags":3852},[3849],"Michael Kozono","2021-04-29","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nIn early 2020, it took 3.5 months of solid work to implement replication of a new data type in Geo. One year later, support can be added within a month -- including development and all required reviews. How did we do it? First, let me introduce you to Geo.\n\n## What is Geo?\n\n[GitLab Geo](https://about.gitlab.com/solutions/geo/) is the solution for widely distributed development teams and for providing a warm-standby as part of a disaster recovery strategy. Geo replicates your GitLab instance to one or more local, read-only instances.\n\n## What are data types?\n\n[GitLab Geo was released in June 2016 with GitLab 8.9](https://about.gitlab.com/releases/2016/06/22/gitlab-8-9-released/#gitlab-geo-new-product) with the ability to replicate project repositories to a read-only secondary GitLab site. Developers located near secondary sites could fetch project repositories as quickly as if they were near the primary.\n\nBut what about wiki repositories? What about LFS objects or CI job artifacts? In GitLab, each of these things is represented by different Ruby classes, database tables, and storage configurations. In Geo, we call these data types.\n\n## Is it really that hard to copy data?\n\nWhen we say a new data type is supported by Geo, this is what we mean:\n\n* Backfill existing data to Geo secondary sites\n* As fast as possible, replicate new or updated data to Geo secondary sites\n* As fast as possible, replicate deletions to Geo secondary sites\n* Retry replication if it fails, for example due to a transient network failure\n* Eventually recover missing or inconsistent data, for example if Sidekiq jobs are lost, or if infrastructure fails\n* Exclude data according to [selective sync settings](https://docs.gitlab.com/ee/administration/geo/replication/configuration.html#selective-synchronization) on each Geo secondary site\n* Exclude remote stored data unless [Allow this secondary node to replicate content on Object Storage](https://docs.gitlab.com/ee/administration/geo/replication/object_storage.html#enabling-gitlab-managed-object-storage-replication) is enabled on a Geo secondary site\n* Verify data integrity against the primary data, after replication\n* Re-verify data integrity at regular intervals\n* Report metrics to Prometheus\n* Report metrics in the Admin UI\n* View replication and verification status of any individual record in the Admin UI\n* Replication and verification job concurrency is configurable in Admin UI\n* Retry replication if data mismatch is detected ([coming soon to all data types using the framework](https://gitlab.com/gitlab-org/gitlab/-/issues/301244))\n* Allow manual re-replication and re-verification in the Admin UI ([coming soon to all data types using the framework](https://gitlab.com/gitlab-org/gitlab/-/issues/216100))\n* And more\n\n## How to iterate yourself into a problem\n\n[Iteration is a core value](https://handbook.gitlab.com/handbook/values/#iteration) at GitLab. In the case of Geo, by [GitLab 12.3](https://about.gitlab.com/releases/2019/09/22/gitlab-12-3-released/#geo-natively-supports-docker-registry-replication) we had added replication support for the most important data types, for example:\n\n* Project Git repositories\n* Project wiki Git repositories\n* Issue/MR/Epic attachments\n* LFS objects\n* CI job artifacts\n* Container/Docker registry\n\nAnd we had added a slew of features around these data types. But suddenly it was clear we had a problem. **We were falling behind in the race to replicate and verify all of GitLab's data.**\n\n* A new data type was being added by other teams, every few months. It was painful to prioritize 3 months of development time only to add replication to one data type. And even if we caught up, the latest features would always be unsupported by Geo for 3 months.\n* Automatic verification of Project and Wiki repositories was implemented, but adding it to a single data type was going to take 3 months.\n* Maintenance and other new features were increasing in effort due to the amount of code duplication.\n* Our event architecture needed too much boilerplate and overhead to add new events\n\n## How to iterate yourself out of a problem\n\nJust because it's possible to iterate yourself into a problem doesn't mean iteration failed you. Yes, ideally we would have seen this coming earlier. But consider that fast and small iteration has likely saved many hours of upfront work on features that have been quickly validated, and have since been changed or removed. It's also possible to [DRY up](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) code too soon into bad abstractions, which can be painful to tear apart.\n\nBut we reached a point where everyone agreed that the most efficient way forward required consolidating existing code.\n\n### Do the design work\n\n[Fabian](https://gitlab.com/fzimmer), our esteemed product manager, [proposed an epic](https://gitlab.com/groups/gitlab-org/-/epics/2161):\n\n> to build a new geo replication and verification framework with the explicit goal of enabling teams across GitLab to add new data types in a way that supports geo replication out of the box\n\nMost of the logic listed above in [Is it really that hard to copy data?](#is-it-really-that-hard-to-copy-data) is exactly the same for all data types. An internal framework could be used to significantly reduce duplication, which could deliver huge benefits:\n\n* Bugs in the framework only have to be fixed once, increasing reliability and maintainability.\n* New features could be added to the framework for all data types at once, increasing velocity and consistency.\n* Implementation details would be better hidden. Changes outside the framework become safer and easier.\n\nThe proposal went further than making it easy for *ourselves* to add Geo support to new data types. The goal was to make it easy for *non-Geo engineers* to do so. To achieve this goal, the framework must be easy to use, easy to understand, and well-documented. Besides the usual benefits of reducing duplication, this higher standard would help:\n\n* Minimize the effort to implement Geo support of new features, whether it's done by a Geo engineer or not.\n* Minimize lag time to add Geo support. If it's easy to do, and anyone can do it, then it's easy to prioritize.\n* Increase awareness in other teams that new features may require Geo support.\n* Influence the planning of new features. There are ways to make it more difficult to add Geo support. This is much easier to avoid during initial planning.\n\nAs a first step, Fabian [proposed creating a proof of concept of a framework](https://gitlab.com/gitlab-org/gitlab/-/issues/35540) leveraging lessons learned and incorporating improvements we already wanted to make to the existing architecture. The issue stimulated lots of design discussion in the team, as well as multiple POCs riffing off one another.\n\nThe biggest change was the introduction of a `Replicator` class which could be subclassed for every data type. The subclasses would contain the vast majority of the specifics to each data type.\n\nIn order to further reduce duplication, we also introduced the concept of a `Replicator strategy`. Most data types in GitLab could be categorized as blobs (simple files) or Git repositories. Within these categories, there was relatively little logic that needed to be specific to each data type. So we could encapsulate the logic specific to these categories in strategies.\n\nAnother significant decision was to make the event system more flexible and lightweight. We wanted to be able to quickly implement new kinds of events for a `Replicator`. We decided to do this without rewriting the entire event processing layer, by packaging and transmitting `Replicator` events within a single, generic event leveraging the existing heavyweight event system. We could then leave the old system behind, and after migrating all data types to the framework, we could easily replace it.\n\nOnce a vision is chosen, it can be difficult to see how to get there with small iterations. But there are often many ways to go about it.\n\n### Code\n\n#### High-level approach\n\nAt a high-level, we could have achieved our goal by taking two data types that were already supported, DRYing up their code, and refactoring toward the desired architecture. This is a proven, safe, and effective method.\n\nBut to me it felt more palatable overall to deliver customer value along the way, by adding support for a brand-new data type while developing the reusable framework. We already had practice implementing many data types, so there was little risk that we would, for example, take too long or use suboptimal abstractions. So we decided to do this with [Package registry](https://docs.gitlab.com/ee/user/packages/).\n\n#### Lay the foundation\n\nOur POCs already answered the biggest open questions about the shape of the architecture. The next step was to get enough of a skeleton merged, as quickly as possible, so that we could unlock further parallel work. To ensure correctness, we aimed to get something working end-to-end. We decided to implement \"replication of newly created Package files\". Much was left out, for example:\n\n* Replication of changes. (Most Blob types, including Package files, are immutable anyway)\n* Replication of deletes\n* Backfill of existing files\n* Verification was left out entirely from the scope of the first epic, since we already knew replication alone provides most of the value to users.\n\nSince the work still required many specific design decisions, we decided to [pair program](https://en.wikipedia.org/wiki/Pair_programming). [Gabriel Mazetto](https://gitlab.com/brodock) and I used [Zoom](https://zoom.us/) and [Visual Studio Live Share](https://visualstudio.microsoft.com/services/live-share/), which worked well for us, though there are many options available. [See a recording of our first call](https://www.youtube.com/watch?v=2XedCiU634s).\n\n[The spike](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/23447) was merged and we thought ourselves safe under the feature flag. Looking back on this particular merge request, we did make a couple mistakes:\n\n1. An [autoloading bug was discovered](https://gitlab.com/gitlab-org/gitlab/-/issues/202044). The merge request was reverted, fixed, and remerged. Thanks to [CI](https://docs.gitlab.com/ee/ci/) and end-to-end QA tests using actual builds, the impact was limited.\n1. The size of the spike was unnecessarily large and difficult to review for a single merge request. As it grew, we should have used it as a \"reference\" merge request from which we could break out smaller merge requests. Since then, GitLab policies have further emphasized [smaller iterations](https://about.gitlab.com/handbook/product/product-principles/#iteration).\n\n#### Build on the foundation\n\nWith the skeleton of the framework in the main branch, we could implement multiple features without excessive conflicts or coordination. The feature flag was enabled on [GitLab's staging environment](https://about.gitlab.com/handbook/engineering/development/enablement/systems/geo/staging.html), and each additional slice of functionality was tested as it was merged. And new issues for bugs and missing features were opened.\n\nWe built up the [developer documentation](https://docs.gitlab.com/ee/development/geo/framework.html) as we went along. In particular, we documented specific instructions to implement a new data type, aimed at developers with no prior knowledge of Geo. These instructions have since been moved to issue templates. For example, [this is the template for adding support to a new Git repository type](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitlab/issue_templates/Geo%20Replicate%20a%20new%20Git%20repository%20type.md). This caught a lot of would-be pain points for users of the framework.\n\nFinally, we released [Geo supports replicating GitLab Package Registries in GitLab 13.2](https://about.gitlab.com/releases/2020/07/22/gitlab-13-2-released/#geo-supports-replicating-gitlab-package-registries)!\n\n## Reaping the benefits\n\nFollowing the release of Geo support for Package Registries, we added support for many new data types in quick succession. Automatic verification was added to the framework. This recently culminated in a non-Geo engineer implementing replication *and verification* for a new data type, within one month!\n\n* In GitLab 13.5, [Geo replicates external merge request diffs and Terraform state files](https://about.gitlab.com/releases/2020/10/22/gitlab-13-5-released/#geo-replicates-external-merge-request-diffs-and-terraform-state-files). These were added by Geo engineers who had been less involved in building the framework. Many refinements to the framework, and especially to the documentation, came out of this.\n* In GitLab 13.7, [Geo supports replicating Versioned Snippets](https://about.gitlab.com/releases/2020/12/22/gitlab-13-7-released/#geo-supports-replicating-versioned-snippets). This was also added by a Geo engineer, and it was the first Git repository type in the framework, so it required more work than adding new Blob types.\n* In GitLab 13.10:\n  * [Geo supports replicating Group wikis](https://about.gitlab.com/releases/2021/03/22/gitlab-13-10-released/#geo-supports-replicating-group-wikis) was implemented by a non-Geo engineer.\n  * [Geo verifies replicated package files](https://about.gitlab.com/releases/2021/03/22/gitlab-13-10-released/#geo-verifies-replicated-package-files). This was a big new feature in the framework, adding automatic verification to any data type that can be checksummed.\n* GitLab 13.11:\n  * [Geo supports Pipeline Artifacts](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#geo-supports-pipeline-artifacts) was implemented by a non-Geo engineer.\n  * [Geo verifies replicated Versioned Snippets](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#geo-verifies-replicated-versioned-snippets).\n* GitLab 13.12:\n  * [An already supported data type, LFS objects, is migrated to the framework under feature flag](https://gitlab.com/gitlab-org/gitlab/-/issues/276696). Following this will be the migration of \"Uploads\" and \"CI Job artifacts\", and then **deleting thousands of lines of code**. This should improve both reliability and velocity, for example, verification will be added to these data types.\n\nIn aggregate:\n\n* In GitLab 12.9, we replicated ~56% of all data types (13 out of 23 in total) and verified ~22%.\n* In GitLab 13.11, we replicate ~86% of all data types (25 out of 29 in total) and verify ~45%.\n* **In the last year, GitLab released six new features that needed Geo support. We replicate 100% of those new features and verify ~57%.**\n\n## What did it cost?\n\nFor comparison, it took around 3.5 months to [implement replication of Design repositories](https://gitlab.com/groups/gitlab-org/-/epics/1633). It took around 6 months to [implement the framework for replication of Package files](https://gitlab.com/groups/gitlab-org/-/epics/2346). So the cost to produce the framework for replication was roughly 2.5 months of work.\n\nWe don't really have a comparable for [implementation of verification](https://gitlab.com/groups/gitlab-org/-/epics/1817), but it looked like it would take about 3 months to implement for a single data type, while it took about 4 months total to implement for Package files and simultaneously add to the framework, for a cost of about 1 month.\n\nGiven that new data types now take about 1 month to implement replication *and verification*, the work to produce the framework **paid for itself with the implementation of a single data type**. All the rest of the benefits and time saved are more icing on the cake.\n\nMy only regret is that we should have done it sooner. I intend to be more cognizant of this kind of opportunity in the future.\n\n## What to expect in the future\n\n* [Already supported data types will be migrated into the framework](https://gitlab.com/groups/gitlab-org/-/epics/3588)\n* New features will be added more quickly, for example, verification will be rolled out for all [Blob](https://gitlab.com/groups/gitlab-org/-/epics/5285) and [Git repository](https://gitlab.com/groups/gitlab-org/-/epics/5286) data types\n* Duplication will be further reduced, for example, by [leveraging Rails generators](https://gitlab.com/gitlab-org/gitlab/-/issues/326842)\n\nHuge thanks to everyone who contributed to closing the gap on replicating *everything* in Geo!\n",[831,811,1698,677,9,832,723],{"slug":3854,"featured":6,"template":680},"how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo","content:en-us:blog:how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo.yml","How We Are Closing The Gap On Replicating Everything In Gitlab Geo","en-us/blog/how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo.yml","en-us/blog/how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo",{"_path":3860,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3861,"content":3867,"config":3873,"_id":3875,"_type":14,"title":3876,"_source":16,"_file":3877,"_stem":3878,"_extension":19},"/en-us/blog/how-we-boosted-webauthn-adoption-from-20-percent-to-93-percent-in-2-days",{"title":3862,"description":3863,"ogTitle":3862,"ogDescription":3863,"noIndex":6,"ogImage":3864,"ogUrl":3865,"ogSiteName":667,"ogType":668,"canonicalUrls":3865,"schema":3866},"How we boosted WebAuthn adoption from 20 percent to 93 percent in two days","With phishing campaigns on the rise across the industry, we accelerated rollout of a program to further enhance our security hygiene program. This is how we did it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682498/Blog/Hero%20Images/webauthn.jpg","https://about.gitlab.com/blog/how-we-boosted-webauthn-adoption-from-20-percent-to-93-percent-in-2-days","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we boosted WebAuthn adoption from 20 percent to 93 percent in two days\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Rubin\"}],\n        \"datePublished\": \"2022-11-09\",\n      }",{"title":3862,"description":3863,"authors":3868,"heroImage":3864,"date":3870,"body":3871,"category":720,"tags":3872},[3869],"Eric Rubin","2022-11-09","\nIn light of the high-profile phishing campaigns that breached public technology companies (e.g. [Twilio](https://techcrunch.com/2022/08/08/twilio-breach-customer-data/), [Uber](https://www.wired.com/story/uber-hack-mfa-phishing/), [Dropbox](https://www.securityweek.com/hackers-stole-source-code-personal-data-dropbox-following-phishing-attack), and others), GitLab decided to accelerate the implementation of the next phase of our security hygiene program, which would further enhance our security posture. As part of this acceleration, GitLab’s IT and Security teams recommended a swift adoption of phishing-resistant authentication across the entire company.\n\n## What did we decide to implement?\n\nWe already required multi-factor authentication (MFA) for all team members to log in to Okta, our primary launching point for the SaaS applications we use. The majority of our team members were primarily using the Okta Verify mobile app for push notifications, although they also had the options of using time-based one-time password ([TOTP](https://www.techtarget.com/searchsecurity/definition/time-based-one-time-password-TOTP)) codes, or [WebAuthn/FIDO2](https://webauthn.guide/) devices such as biometric (for example, Touch ID and Face ID) or security keys. \n\nWe decided to mandate the use of WebAuthn devices as the sole method for logging into Okta and remove other methods, and to get almost all team members enrolled within 48 hours from the date of launch.\n\n## Why is using WebAuthn important?\n\nOther two-factor authentication methods have known limitations. We already prohibited the use of SMS as a method for MFA as it is vulnerable to [SIM swap attacks](https://9to5mac.com/2021/10/01/protections-against-sim-swap/#:~:text=A%20port%2Dout%20attack%20is,new%20account%2C%20which%20they%20control); additionally, SMS provides a long duration for the texted code to be used by a phisher on the legitimate website. TOTP codes have a shorter duration, but still could allow for [relay attacks](https://intel471.com/blog/otp-password-bots-telegram). Push-based MFA such as the Okta Verify mobile app is vulnerable to [MFA fatigue attacks](https://www.uber.com/newsroom/security-update), where an attacker repeatedly bombards the user in the hope that they either get frustrated and approve a notification to make it stop, or otherwise accidentally approve one. \n\nWe decided that we needed to go back to fundamentals – strong MFA that is phishing-resistant. WebAuthn uses public cryptography, which verifies that the website you are logging into is the correct one. Additionally, the website only allows specifically enrolled devices to complete the authentication. The WebAuthn device effectively takes the human out of the loop – you can’t send the credentials to a phishing site. \n\n## How did we communicate the change to mandatory WebAuthn?\n\nThe communication to team members about the transition to WebAuthn started with a company wide Slack announcement from our CEO and co-founder [Sid Sijbrandij](https://gitlab.com/sytses). The message was delivered on a Tuesday evening Pacific Time, with an implementation completion date of Thursday evening Pacific Time. \n\nWe also:\n- Created a dedicated Slack channel for team member questions.\n- Circulated a Google Doc FAQ with more than 47 questions populated by team members and answered by the [DRI](/handbook/people-group/directly-responsible-individuals/) for the implementation or other team members. At GitLab everyone is encouraged to contribute.\n- Highlighted the change in our internal newsletter.\n- Added documentation, including easy-to-follow instructions, to our [handbook](/handbook/business-technology/okta/).\n\n## How did we implement the change to WebAuthn?\n\nHow could we roll out WebAuthn so quickly, with more than 1,700 team members working remotely across more than 65 countries? We had already started the ball rolling earlier this year. First, we pre-tested with a small group of IT, and then company-wide volunteers, providing instructions for team members to use. Uptake was low though, so we knew we had to be more assertive. \n\nGitLab is a majority Mac company, so we were able to take advantage of the built-in Touch ID capability already available on team members' laptops. It was also very helpful that users were familiar with the technology from using it on their smartphones.\n\nFor the ~5% of users who are on Linux, we instructed them to use their YubiKeys, and if they didn’t already have one, we facilitated delivery via Yubico’s [YubiEnterprise Delivery](https://www.yubico.com/products/yubienterprise-delivery/). We allowed any team member who wanted a YubiKey to get one via our deal, including Mac users who wanted to use Firefox ([Touch ID isn’t supported yet](https://bugzilla.mozilla.org/show_bug.cgi?id=1536482)), those who work with their laptop docked and didn’t want a new Touch ID external keyboard, or any other reason. In all, we had about 20% of our team members take up our offer to obtain YubiKeys.\n\nOur biggest win after the start of rollout was the discovery of how to add new WebAuthn devices to Okta (such as a new laptop or smartphone) via QR code scanning. This meant that as long as team members had a single enrolled device (either their laptop or their phone), they could [self-service](/handbook/business-technology/okta/#i-want-to-add-touch-id--face-id-to-okta-for-my-mobile-device-iphone-android-tablet) the WebAuthn enrollment of a new device, without requiring IT Helpdesk support. This helped us to speed the rollout and reinforced our security posture at a quicker pace, and meant that we didn’t have to send all team members YubiKeys that would only be used in the relatively rare event of needing to enroll a new device.\n\n## Initial results \n\nAfter the Slack announcement was posted, our IT Helpdesk team held virtual “office hours” on Zoom staffed for at least two hours per region. During the virtual office hours team members could drop in and get real-time help. After 24 hours from the launch of the initiative, we found that 80% of team members had already enrolled!\n\nTo push us further along, a Slack Bot was created and customized messages were sent directly to team members who had not yet enrolled and their managers. This additional step brought our enrollment efforts to the 93% mark of our team members.\n\nAt our deadline, we implemented carefully crafted new policies in Okta, locking down the vast majority of team members to using only WebAuthn. Small exception groups were created for those on PTO (because it would be frustrating for them and create unnecessary troubleshooting requests for the IT Helpdesk), as well as some users awaiting arrival of their shipped YubiKeys.\n\nThe new Okta policy and communication efforts were quite successful for us, and we have been pleased at the low volume of support requests, given the magnitude of the change and the timeframe given.\n\n## Going forward \n\nWe know that [threat vectors are always evolving](/blog/top-challenges-to-securing-the-software-supply-chain/) and we will continue to monitor them closely. We also will continue to assess our security posture and iterate to make improvements as needed.\n\nCover image by [FLY:D](https://unsplash.com/@flyd2069) on Unsplash.\n{: .note}\n",[720,1295,9],{"slug":3874,"featured":6,"template":680},"how-we-boosted-webauthn-adoption-from-20-percent-to-93-percent-in-2-days","content:en-us:blog:how-we-boosted-webauthn-adoption-from-20-percent-to-93-percent-in-2-days.yml","How We Boosted Webauthn Adoption From 20 Percent To 93 Percent In 2 Days","en-us/blog/how-we-boosted-webauthn-adoption-from-20-percent-to-93-percent-in-2-days.yml","en-us/blog/how-we-boosted-webauthn-adoption-from-20-percent-to-93-percent-in-2-days",{"_path":3880,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3881,"content":3887,"config":3892,"_id":3894,"_type":14,"title":3895,"_source":16,"_file":3896,"_stem":3897,"_extension":19},"/en-us/blog/how-we-built-gitlab-geo",{"title":3882,"description":3883,"ogTitle":3882,"ogDescription":3883,"noIndex":6,"ogImage":3884,"ogUrl":3885,"ogSiteName":667,"ogType":668,"canonicalUrls":3885,"schema":3886},"How we built GitLab Geo","Take a deep dive into the many architectural decisions we made while building GitLab Geo.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678985/Blog/Hero%20Images/how-we-built-geo-cover.jpg","https://about.gitlab.com/blog/how-we-built-gitlab-geo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we built GitLab Geo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Mazetto\"}],\n        \"datePublished\": \"2018-09-14\",\n      }",{"title":3882,"description":3883,"authors":3888,"heroImage":3884,"date":3016,"body":3890,"category":743,"tags":3891},[3889],"Gabriel Mazetto","\n[Geo](/solutions/geo/), our solution for read-only mirrors of your GitLab instance, started with our co-founder [Dmitriy Zaporozhets](/company/team/#dzaporozhets)’ crazy idea of making not only the repositories, but the entire GitLab instance accessible from multiple geographical locations.\n\nAt that time (Q4 of 2015) there were only a few competitors trying to provide an *automatic mirroring* solution for repositories and/or issue trackers, and they were mostly built around an additional independent instance and a bunch of webhooks to replicate events. Also, in those cases, no other data was shared outside this asynchronous replication channel, and you had to set up the webhook per project and take care of the users yourself. Long story short: this was not practical for any instance with more than a couple of projects.\n\nWe also had a previous experience early that year [using DRBD to migrate 9 TB of data](/blog/moving-all-your-data/) from our dedicated co-location hosting to the AWS cloud,\nwhich didn't provide the scale, performance, or the UX we had in mind for the future.\n\nHere's the history of how we built Geo:\n\n## Phase 1: MVP\n\nGeo's first mission was to provide people who were located in satellite offices, or in distant locations, with fast access to the tools they need to get work done. The plan was not only to make it faster for Git clones to occur in remote offices but also to provide a fully functional read-only version of GitLab: all project issues, Git repositories, Wikis, etc. automatically synchronized from the primary with as little delay as possible.\n\nTo get there we made a few architectural decisions:\n\n#### 1. Use native database replication\n\nThis would allow us to replicate any user-visible information, user content, user and permissions, projects, any project relation to groups/namespaces, etc. Basically, any data ever written to the database in the primary node made readily available to the others, without any extra communication overhead in the webhooks.\n\nIt is also the most [Boring Solution](https://handbook.gitlab.com/handbook/values/#efficiency), as it uses proven technologies developed for databases in the past two decades. To simplify the endeavor we decided to support only PostgreSQL.\n\n#### 2. Use API calls to notify any secondary node of changes that should happen on disk\n\nThis is the second synchronization mechanism. If a new project is created or a repository updated, this notification lets any other node know they have this pending action, and should replicate the new data on disk.\n\n#### 3. Use Git itself to replicate the repositories\n\nWe investigated many alternatives to replicate our repositories, from using basic UNIX tools (like `rsync` or equivalent) to specific distributed file-systems features. We were aiming for a simple solution, as ideally we had to support the lowest common denominator, which is a Linux machine running the default filesystem (ext3 or 4). That limitation ruled out any distributed file-system based implementation.\n\nWe considered `rsync` and its variants as well, which could potentially work for our use case, but that would add significant CPU for each synchronization operation, and we expect it to increase as the repositories get bigger and bigger.\n\nBy using `rsync` we would need to grant more on-disk permissions than we were comfortable doing, and restricting its reach could be an engineering challenge in itself.\n\nThe same can be said for `scp` and its variants. In the end, we decided to use Git itself and benefit from its internal protocol. This was a no-brainer and very easy decision to make. We understood the protocol enough and we already had the required safeguards in place. All we needed was a slightly different authentication mechanism for the node-to-node synchronization.\n\n#### 4. Always push code to the primary, pull code from anywhere\n\nWhen we started Geo, there was no bundled Git support for having a multi-repository \"transactional\" replication, or information on how to implement one.\n\nWe figured out quickly that to implement something on that line it would require either a *global lock* or to implement a variant of [RAFT](https://raft.github.io/)/[PAXOS](https://en.wikipedia.org/wiki/Paxos_(computer_science)) on top of Git internal protocol.\n\nBoth solutions have their downsides and tradeoffs, and adding to that the time and effort to build it correctly, led us to opt for the simplest implementation: always push to the primary, notify secondaries that repository data changed, and have the secondaries fetch the changes. This is also in line with our motto of [Boring Solutions](https://handbook.gitlab.com/handbook/values/#efficiency).\n\nThe initial repository synchronization is no different than doing a `git clone \u003Cremote> --mirror`. The same idea goes for the repository updates, they behave very similarly to a `git fetch \u003Cremote> --prune`. The difference is that we need to replicate additional, internal metadata as well, that is not normally exposed to a regular user.\n\n![GitLab Geo - MVP Synchronization Architecture Diagram](https://about.gitlab.com/images/blogimages/how-we-built-geo/geo-architecture-mvp.png){: .medium.center}\n\n#### 5. Don’t replicate Redis data between nodes\n\nWe initially thought we could replicate Redis as well as the main database in order to share cached data, session information, etc. This would allow us to implement a Single Sign-On solution very easily, and by reusing the cache we would speed up the initial page load.\n\nAt that time Redis only supported **Leader** to **Follower** replication mode and even though it is usually super fast when used in a local network, the fact remains that replicating data across disparate geographical locations can add significant latency.\n\nThis additional latency would impact on the initial objective of simplifying the Single Sign-On implementation. If you simply log in on the primary node and get redirected to the secondary, chances are that the session information would still not be available on the secondary node due to the replication latency.\n\nThat would eventually fix itself by redirecting back and forth, but if the latency is significant enough, your browser will terminate the connection based on the redirect loop prevention feature. Another downside of this approach is that it creates a hard dependency on the primary node being online, or otherwise the secondary node would be inaccessible and/or completely broken.\n\nIn addition to all these issues, we needed an additional Redis instance that supports writing data to it, in order to persist Jobs to our Jobs system on the secondary node.\n\nSo it made sense, in the end, to give up on the idea of replicating Redis, and we started looking for a solution to the authentication problem.\n\n#### 6. Authenticate on the primary node only\n\nBecause we can’t write on the main database of secondary nodes, any auditing logs, brute force protection mechanism, password recovery tokens, etc. can’t have their data and state persisted inside secondary nodes. The only viable solution then is to authenticate on the primary and redirect the user to the secondary.\n\nThis decision also helped with the integration of any company-specific authentication systems. If a company uses internal authentication based on LDAP, CAS or SAML for instance, then they wouldn't have to replicate that system to the other location or configure firewall rule exceptions to accept traffic over the internet.\n\n#### 7. Implement Single Sign-On and Single Sign-Off using OAuth\n\nWith the previous Redis limitations in mind, we looked into alternatives to implement the authentication. We had to choose between either CAS or an OAuth-based one. As we already had OAuth Provider support inside GitLab, we decided to go with that.\n\nBasically, for any Geo node configured in the database we also have a corresponding OAuth application inside GitLab, and whenever a new user tries to log into a Geo node, they get redirected to the primary node and need to \"allow\" the \"Geo application\" to have access to their account credentials at the first login.\n\nThe shortcoming here is that if you are not logged in already and the primary goes down, you can't log in again until the primary node connectivity issue is fixed.\n\n#### 8. Build a read-only layer on the application side to prevent accidents\n\nWe needed this safeguard in place in case any required subsystem was misconfigured. With the read-only layer, we can prevent the instance from diverging from the primary in a non-recoverable way. It's also this layer that prevents anyone from pushing a repository change to the secondary node directly.\n\n#### 9. Don’t replicate any user attachments yet, just redirect to the primary\n\nInstead of trying to replicate user attachments at this stage, we decided to just rewrite the URLs pointing the resource to the primary node instead. This allowed us to iterate faster and still provide a decent experience to the end users.\n\nThey would still enjoy faster access to the repository data and have the web UI rendering the content from a closer location, with the exception of the issue/merge request attachments, avatars etc, which were still being fetched from the primary. But as they are also highly cachable the impact is minimal.\n\nThis was the initial foundation that allowed us to validate Geo as a viable solution. Later on, we took care of replicating the missing data as well.\n\n### Bonus trivia\n\nThe term **Geo** came only after a while, it was previously named as **GitLab RE** (*Read-Only Edition*), followed by **GitLab RO** (*Read Only*) before getting its final name: **GitLab Geo**.\n\n## Phase 2: First-generation synchronization mechanism\n\nWith the MVP implementation done, we needed to pave the way for a stable release. The first part we decided to improve was the notification mechanism for pending changes. During the MVP, we built a custom API endpoint and a buffered queue. That queue was also optimized to store only unique, idempotent events. If a project received three push events in the last few seconds, we only needed to store and process one event notification.\n\nWe decided that instead of building our own custom notification \"protocol\" and implementing some early optimizations, we should leverage existing GitLab internal capabilities: our own webhooks system.\n\n![GitLab Geo - First Generation Synchronization Architecture Diagram](https://about.gitlab.com/images/blogimages/how-we-built-geo/geo-architecture-first-gen.png){: .medium.center}\n\nBy taking that route, we would be forced to \"[drink our own champagne](https://en.wikipedia.org/wiki/Eating_your_own_dog_food#Alternative_terms)\" and as a result, improve our existing functionality. That decision actually resulted in improvements to our system-wide webhooks in a few ways. We added new system-wide webhook events, expanded the granularity of the information available, and fixed some performance issues.\n\nWe've also improved the security of our webhooks implementation by adding ways of verifying that the notification came from a trusted source. Previously the only way to do that relied on whitelisting the originating IP address as a way to establish trust.\n\nThis security limitation was not present in the MVP version, as we reused the admin personal token as the authorization mechanism for the API, which is also not ideal, but better than previous webhook implementation.\n\nI consider this to be the first generation of the synchronization mechanism that was used in the wild. It had a few characteristics: it reacted almost like real-time for small updates, webhook was fast enough and parallelizable to be used on the scale we wanted to support.\n\nAs the very first version of Geo was only concerned with getting repositories available and in-sync, from one location to the other, that's where we focused all of our efforts. At that time, setting up a new Geo node required an almost identical clone of the primary to be available in advance. That included not only replicating the database but also *rsyncing* the repositories from one node to the other. For improved consistency, we required initially a *stop the world* phase in order to not lose changes made during the time between when the backup started and when the secondary node got completely set up.\n\nWhile this was still closer to a barebones solution, it already provided value for remote teams working together in a shared repository or simply in any project that needed to synchronize code between different locations. We had a few customers trying it out and evaluating the potential, but it was still not ready for production use as we were still missing a lot of functionality.\n\nThe *stop the world* phase previously mentioned got phased out later with the help of improved setup documentation. Much later, a good chunk of the initial cloning step got simplified by leveraging some improvements in the next-generation synchronization and by introducing a backfilling mechanism.\n\n### First-generation synchronization pitfalls\n\nWhile our first-generation solution worked fine for the highly active repositories, the use of webhooks as a notification mechanism had some really obvious drawbacks.\n\nIf, for any reason, the notification failed to be delivered, it had to be rescheduled and retried. Also because we were using our internal Jobs system to dispatch the webhooks, having a node go dark for a few hours meant our Jobs system would be busy retrying operations over an unreachable destination for at least that same amount of time.\n\nDepending on the volume of data and how long it has been accumulating changes, that could even fill up the Redis instance disk storage. If that ever happened we would have to resync the whole instance again and start from scratch.\n\nWe've improved the retrying mechanism with custom Geo logic to alleviate the problem, but it was clear to us that this was not going to be a viable solution for a Generally Available (*stable*) release.\n\nAlso because of backoff algorithm in the retrying logic, in conjunction with the asynchronicity aspect of the system, it could lead to important changes taking a lot of time to replicate, especially in less active projects. The busiest ones were less affected, as any update to the repository would get it to the current state rather than to the state when the update notification was issued. And because the project is receiving many updates during the day, it's expected to generate also many notification events.\n\nAny implementation misstep between sending the webhook or receiving and processing it on the other side could mean we would lose that information forever. This was again not a major issue with highly active projects, as it would eventually receive a new, valid update notification which would sync it to the current state, but the outliers could miss it until someone notices or another update arrives much later.\n\nWe also wanted to make Geo a viable Disaster Recovery solution in the long term, so missing updates without a way to recover from it was not an option.\n\n## Phase 3: Second-generation synchronization mechanism\n\nWe started looking for alternative ways of notifying the secondary nodes and also considered switching to other standalone queue systems instead. We were also worried about the lack of control over the order in which the operations would happen in a parallel and asynchronous replication system and on the effect it had on the data on disk.\n\nA few examples of situations that can happen because of the parallelism and the async nature of it:\n\n1. A project removal event can be processed before a project update for the same repository\n1. Renaming, creating a project with the new name and sending new content to it, if processed in an incorrect order, can lead to temporary data loss\n\nThere was also the case when the notification arrived before the database had replicated the required data. As an example, when the node receives the notification for new project creation, but the database doesn't have it yet.\n\nThat required the secondary node to keep a \"mailbox\" until the received events are ready to be processed. As they were basically Jobs, that meant keep retrying until the job succeeded.\n\nConsidering all the complexity we had brought to the application layer, we investigated a few standalone queue systems to which we could offload the burden, but decided ultimately to build an event queue mechanism in PostgreSQL instead, as it had three important advantages:\n\n#### 1. No extra dependencies\nWe were already replicating the database, so there is no need to install, configure and maintain another process, worry about backing up yet another component, integrate it in our Omnibus package, and provide support for our users.\n\n#### 2. No more delayed processing\nIf the event arrives on the other side, the data associated with it will already be there as well. We can also guarantee consistency with transactions and repeat less information than with the webhooks implementation.\n\n#### 3. Easy to retry/restore from backup or in a disaster situation\nWith a standalone queue system, to have a consistent backup solution you either need some sort of \u003Cabbr title=\"Write-Ahead Logging\">WAL\u003C/abbr> files that could help rebuild a consistent state between the systems or do backups in a \"stop the world\" way, otherwise, you may lose data.\n\n### Our implementation\n\nWe took inspiration from how other log-based replication systems work (like the database) and implemented it with a central table as the main source of truth and a few others to hold bookkeeping for specific event types. Any relevant information we used to ship with the webhook notification is now part of this implementation, with extras to support the missing replicable events.\n\nOn the secondary node, these new tables are read by a specific daemon (we call it the Geo Log Cursor), and as the name suggests, it holds a persistent pointer of the last processed event. This allows us to also report the state of replication and monitor if our replication is broken. We also made it highly available, so you can boot up one as **Active** and keep a few extras as **Standby**. If the Active daemon stops responding for a specified amount of time a new election starts and one of the Standbys takes place as the new Active.\n\nThe second part of the new system requires a persistent layer on the secondary node to keep any synchronization state and metadata. This was done by using another PostgreSQL instance.\n\nWe couldn’t reuse the same main instance, as we were replicating with *Streaming replication* mode. With *Streaming replication*, the whole instance is replicated, and you can’t perform any change in it. The alternative to being able to replicate and write in the same instance is to use *Logical replication* mode, but at that time, there was no official *Logical Replication* support available in the PostgreSQL versions we supported (PgLogical was also not a viable alternative back then).\n\nWith the new persistence layer (we call it the *Geo Tracking Database*), we had the foundations built to be able to actively compare the \"desired vs actual\" state, and find missing data on any secondary instance. We built a more robust backfilling mechanism based on that as well.\n\nQuerying between the two database instances (the replicated Secondary, and the Tracking Database), were made much faster and scalable by enabling Postgres FDW ([Foreign Data Wrapper](https://www.postgresql.org/docs/9.6/static/postgres-fdw.html)). That allowed us to query data using a few **LEFT JOIN** operations among the two instances, instead of pooling with multiple queries from the application layer against the two databases in isolation.\n\n![GitLab Geo - Second Generation Synchronization Architecture Diagram](https://about.gitlab.com/images/blogimages/how-we-built-geo/geo-architecture-second-gen.png){: .medium.center}\n\n### Other improvements\n\nAnother important shortcoming fixed was how we replicated the SSH Keys. This was technical debt we needed to pay since the first implementation. Historically, GitLab built the SSH authorization mechanism as with many other Git implementations, by writing each user-provided SSH Key to the `AuthorizedKeys` file on the server and pointing each one to our [gitlab-shell](https://gitlab.com/gitlab-org/gitlab-shell) application.\n\nThis implementation allowed us to authenticate the authorized users, and because we control how the Shell application is invoked, we can pass a specific key ID to it, that can be used later to identify the user on our database and authorize/deny operations to specific repositories.\n\nThe problem with this approach, in general, is that the bigger the user base is, the slower the initial request will be, as OpenSSH will have to perform a scan to the whole file (**O(N)** complexity). With Geo, that's not just about speed but any delay in updating this file either to add a new key or to revoke an existing one is very undesirable.\n\nWhen we decided to fix that we did for both Geo and GitLab Core by using an interesting feature present in newer versions of OpenSSH (6.9 and above), that allows overriding the `AuthorizedKeys` step, switching from reading the keys from a file to invoke a specified CLI instead (*O(1)* complexity). You can read more about it [in the documentation here](https://docs.gitlab.com/ee/administration/operations/fast_ssh_key_lookup.html#doc-nav).\n\nWe fixed another shortcoming around the repository synchronization, switching from Git over SSH protocol, to Git over HTTPS. The initial motivation was to simplify the setup steps, but that decision also allowed us to shape the synchronization differently when it was originated from a Geo node, vs a regular request. Internally we store additional metadata in the repository and also commits that may no longer exist in your regular branches, but were part of a previous merge request, or had user comments associated with them.\n\nBy also switching to full HTTP(S), it made it simpler to run our development instances locally with [GDK](https://gitlab.com/gitlab-org/gitlab-development-kit), which helped to improve our own internal development process as well.\n\n## Phase 4: Third-generation synchronization and the path to a Disaster Recovery solution\n\nWhile still working in Phase 3, we discovered another major limitation around how we stored files on disk. GitLab, for historical reasons, stored repositories and file attachments in a similar disk structure as the base URL routes. For group and project `gitlab-org/gitlab-ce` there would be a path on disk that would include `gitlab-org/gitlab-ce` as part of it. The same is true for file attachments.\n\nKeeping both the database and disk in sync, even not considering Geo replication, means that at any time a project is renamed, several things have to be renamed on disk as well.\n\nThis is not only slow and error prone: what should we do if something fails to rename in the middle of the \"transaction?\" This is also problematic when replication comes into place as we are susceptible again to processing it in the correct order or risk a temporarily inconsistent state.\n\nWe tried to find a solution to problems around the order of execution of the events and we came up with three ideas:\n\n1. **Find or build a queue system that is guaranteed to process things in the same order they were scheduled**\n2. **Detect and recover from any replication failure or data corruption**\n3. **Make every replication operation idempotent, removing the queue-ordering requirement completely**\n\nThe first one was easily ruled out, as even if we switched to a queue system with that type of guarantee, it would be either slow due to the lack of parallelism in order to guarantee the order requirement, or will be extremely complex and hard to use as it would require extra care to have the same guarantees while also working in parallel.\n\nWe found no system that satisfied our needs, and even if we considered a standalone queue solution, we would lose the Postgres advantage from the previous generation, of having both the main database and the queue system always in sync.\n\nRuling out the first one, we considered the second idea of detecting and recovering from failures and data corruption as we concluded we needed it for *Disaster Recovery* anyway. Any robust *Disaster Recovery* solution needs to guarantee that the data it is holding is the exact one it's supposed to have. If, for any reason, that data gets corrupt or someone removes it from disk, it needs a way of detecting it and restores it to the desired state.\n\nTo achieve that, we built a robust verification mechanism that generates a checksum of the state of the repository and is stored in a separate table in the primary node. That table gets replicated to secondary nodes, where another checksum is also calculated (and stored in the Tracking Database). If both checksums match, we know the data is consistent. The checksum is recalculated automatically when an update event is processed, but can also be triggered manually.\n\n![Screen Capture - Repository Verification Status](https://about.gitlab.com/images/blogimages/how-we-built-geo/verification-status-primary.png){: .medium.center}\n\nWe used that mechanism to validate all repositories in `gitlab.com` when successfully [migrating from Azure to GCP](/blog/gcp-move-update/), last month.\n\nThe verification mechanism is not enough and while it gives us the guarantees we need, we can do better, which is why we also decided to implement the third idea as well, and make every replication operation idempotent in order to remove any situation where processing the incorrect order of events would put data in a temporarily inconsistent state.\n\nWe are calling that solution the [Hashed Storage](https://docs.gitlab.com/ee/administration/repository_storage_types.html). This is a complete rewrite of how GitLab stores files on disk. Instead of reusing the same paths as present in the URLs, we use the internal IDs to create a hash instead and derive the disk path from that hash. With the Hashed Storage, renaming a project or moving it to a new group requires only the database operations to be persisted, as the location on disk never changes.\n\n![Hashed Storage and Legacy Storage example](https://about.gitlab.com/images/blogimages/how-we-built-geo/hashed-storage-disk-path-example.png){: .medium.center}\n\nBy making the paths on disk immutable and non-conflicting, any `create`, `move` or `remove` operations can happen in any order, and they will never put the system in an inconsistent state. Also replicating a project rename or moving a project from one group/owner to another will require only the database change to be propagated to take full effect on a secondary node.\n\n## What to expect from Geo in the near future\n\nImplementing Geo has been an important effort at GitLab that involved many different areas. It is a crucial infrastructure feature that allowed us to migrate from one cloud provider to another. We also believe it's an important component to support the needs of many organizations today, from providing peace of mind regarding data safety in the events of a Disaster Recovery, to easing the burdens of distributed teams across the globe.\n\nWe've been using the feature ourselves and this allowed us to stress-test the biggest and most challenging GitLab installation, GitLab.com, making sure it will work just as fine for any other customer.\n\nOver the upcoming months we will be focusing on the following items:\n\n* Release a push proxy for Geo secondary nodes: [Pull and push from the same remote transparently](https://gitlab.com/groups/gitlab-org/-/epics/124)\n* Release [Hashed Storage as *Generally Available*](https://gitlab.com/groups/gitlab-org/-/epics/75)\n* Improve configuration: We want to reduce the steps and make it [simpler via automating most steps](https://gitlab.com/groups/gitlab-org/-/epics/367)\n* Improve the verification step: [Improve the signals we use for the checksum](https://gitlab.com/gitlab-org/gitlab-ee/issues/5196)\n* [Improve the Geo UX and UI](https://gitlab.com/groups/gitlab-org/-/epics/369)\n* Keep improving performance and reliability\n* Support replication of [GitLab Pages](https://gitlab.com/gitlab-org/gitlab-ee/issues/4611) and the internal [Docker Registry](https://gitlab.com/gitlab-org/gitlab-ee/issues/2870)\n\nCover photo by [NASA](https://unsplash.com/photos/Q1p7bh3SHj8) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[677,9],{"slug":3893,"featured":6,"template":680},"how-we-built-gitlab-geo","content:en-us:blog:how-we-built-gitlab-geo.yml","How We Built Gitlab Geo","en-us/blog/how-we-built-gitlab-geo.yml","en-us/blog/how-we-built-gitlab-geo",{"_path":3899,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3900,"content":3906,"config":3912,"_id":3914,"_type":14,"title":3915,"_source":16,"_file":3916,"_stem":3917,"_extension":19},"/en-us/blog/how-we-designed-the-gitlab-reference-architectures",{"title":3901,"description":3902,"ogTitle":3901,"ogDescription":3902,"noIndex":6,"ogImage":3903,"ogUrl":3904,"ogSiteName":667,"ogType":668,"canonicalUrls":3904,"schema":3905},"How we designed the GitLab Reference Architectures","Take a look back with us as we dive into our Reference Architectures design journey to help users easily deploy GitLab at scale. Learn our goals, process, and what's happened in the five years since.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098651/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%282%29_52vS9ne2Hu3TElOeHep0AF_1750098651525.png","https://about.gitlab.com/blog/how-we-designed-the-gitlab-reference-architectures","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we designed the GitLab Reference Architectures\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Grant Young\"}],\n        \"datePublished\": \"2024-10-02\",\n      }",{"title":3901,"description":3902,"authors":3907,"heroImage":3903,"date":3909,"body":3910,"category":1291,"tags":3911},[3908],"Grant Young","2024-10-02","We introduced the first [GitLab Reference Architectures](https://docs.gitlab.com/ee/administration/reference_architectures) five years ago. Originally developed as a partnership between the GitLab Test Platform (formally Quality Engineering) and Support teams, along with other contributors, these architectures aim to provide scalable and elastic starting points to deploy GitLab at scale, tailored to an organization's target load.\n\nSince their debut, we've been thrilled to see the impact these architectures have had on our customers as they navigate their DevSecOps journey. We continue to iterate, expand, and refine the architectures, reflecting our commitment to providing you with the latest, best-in-class guidance on deploying, scaling, and maintaining your GitLab environments.\n\nIn recognition of the five-year milestone, here is a peek behind the curtain on _how_ we designed the Reference Architectures and how that design still applies today.\n\n## The problem\n\nBefore introducing the Reference Architectures, we frequently heard from our customers about the hurdles they faced when deploying GitLab at scale to meet their performance and availability goals.\n\nWhile every GitLab environment can be considered a little unique because of the need to meet a customer's own requirements, we recognized from running GitLab.com, as well as from our larger customers, that there were common fundamentals to deploying GitLab at scale that were worth sharing. Our objective was to address customer needs while promoting deployment best practices to reduce drift and increase alignment.\n\nSimultaneously, we wanted to significantly expand our performance testing efforts. The goals of this expansion were to provide our engineering teams with a deeper understanding of performance bottlenecks, to drive improvements in GitLab's performance, and to continuously test the application moving forward to ensure it remained performant. However, to conduct meaningful performance tests, we needed a standardized GitLab environment design capable of handling the target loads.\n\nEnter the Reference Architectures.\n\n## The goals\n\nWith the need for a common architecture clear, we turned next to set the goals of this initiative, which ultimately became the following:\n\n- Performance: Ensure the architecture can handle the target load efficiently.\n- Availability: Maximize uptime and reliability wherever possible.\n- Scalability and elasticity: Ensure the architecture is scalable and elastic to meet individual customer needs.\n- Cost-effectiveness: Optimize resource allocation to avoid unnecessary expenses.\n- Maintainability: Make the architecture deployment and management as straightforward as possible with standardized configurations.\n\nIt's crucial to note that these goals were not in order and they are goals we stay true to today.\n\n## The process\n\nOnce the goals were set, we faced the challenge of designing an architecture, validating it, and making sure that it was fit for purpose and met those goals.\n\nThe process itself was relatively simple in design:\n\n- Gather metrics on existing environments and the loads they were able to handle.\n- Define a prototype architecture based on these metrics.\n- Build and test the environment to validate.\n- Adjust the environment iteratively based on the test results and metrics until we had a validated architecture that met the goals.\n\nWhile simple in design, this, of course, was not the case in practice so we got to work.\n\nFirst, we collected and reviewed the data. To that end, we reviewed metrics and logging data from GitLab.com as well as several participating large customers to correlate the environment sizes deployed to the load they were handling. To achieve this, we needed an objective and quantifiable way to measure that load across any environment, and for that we used **Requests per Seconds (RPS)**. With RPS we could see the concurrent load each environment handled and correlate this to the user count accordingly. Specifically, a user count would correlate to the full manual and automated load (such as continuous integration). From that data, we were able to correlate this across several environment sizes and start to pick out common patterns for the architectures.\n\nNext, we started with a prototype architecture that aimed to meet the goals while cross-referencing with the data we collected. In fact, we actually started this step in conjunction with the first step initially as we had a good enough idea of where to start: Taking the fundamental GitLab.com design and scaling it down for individual customer loads in cost-effective ways. This allowed us to start performance testing the prototype with the data we were analyzing to corroborate accordingly. After quite a few iterations, we had a starting point for our prototype architecture.\n\nTo thoroughly test and validate the architecture we needed to turn to performance testing and define our methodology. The approach was to target our most common endpoints with a representative test data set at RPS loads that were also representative. Then, although we had manually built the prototype architecture, we knew we needed tooling to automatically build environments and handle tasks such as updates. These efforts resulted in the [GitLab Performance Tool](https://about.gitlab.com/blog/how-were-building-up-performance-testing-of-gitlab/) and [GitLab Environment Toolkit](https://about.gitlab.com/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale/), which I blogged about previously and which we continue to use to this day (and you can use too!).\n\nWith all the above in place we started the main work of validating the prototype architecture through multiple cycles of testing and iterating. In each cycle, we would performance test the environment, review the results and metrics, and adjust the environment accordingly. Through iteration we were able to identify what failures were real application performance issues and what were environmental, and eventually we had our first architecture. That architecture is now known as the [200 RPS or 10,000-user Reference Architecture](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html).\n\n![GitLab Reference Architecture - 200 RPS](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098658/Blog/Content%20Images/Blog/Content%20Images/reference_architecture_aHR0cHM6_1750098658326.png)\n\n## Where Reference Architectures are today\n\nSince publishing our first validated Reference Architecture, the work has never stopped! We like to describe the architectures as living documentation, as they're constantly being improved and expanded with additions such as:\n\n- various Reference Architecture sizes based on common deployments\n- non-highly available sizes for smaller environments\n- full step-by-step documentation in collaboration with our colleagues in Technical Writing and Support\n- expanded guidance and new naming scheme to help with right sizing, scaling, and how to deal with outliers such as monorepos\n- cloud native hybrid variants where select components are run in Kubernetes\n- recommendations and guidance for cloud provider services\n- and more! Check out the [update history](https://docs.gitlab.com/ee/administration/reference_architectures/#update-history) section in the Reference Architecture documentation!\n\nAll this is driven by our [comprehensive testing program](https://docs.gitlab.com/ee/administration/reference_architectures/#validation-and-test-results) that we built alongside the Reference Architectures to continuously test that they remain fit for purpose against the latest GitLab code _every single week_ and to catch any unexpected performance issues early.\n\nAnd we're thrilled to see these efforts have helped numerous customers to date as well as our own engineering teams deliver new, exciting services. In fact, our engineering teams used the Reference Architectures to develop [GitLab Dedicated](https://about.gitlab.com/dedicated/). Five years on, our commitment is stronger than ever. The work very much continues in the same way it started to ensure you have the best-in-class guidance for your DevSecOps journey.\n\n> Learn more about [GitLab Reference Architectures](https://docs.gitlab.com/ee/administration/reference_architectures/).\n",[1090,1298,1295,9,2437],{"slug":3913,"featured":91,"template":680},"how-we-designed-the-gitlab-reference-architectures","content:en-us:blog:how-we-designed-the-gitlab-reference-architectures.yml","How We Designed The Gitlab Reference Architectures","en-us/blog/how-we-designed-the-gitlab-reference-architectures.yml","en-us/blog/how-we-designed-the-gitlab-reference-architectures",{"_path":3919,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3920,"content":3926,"config":3931,"_id":3933,"_type":14,"title":3934,"_source":16,"_file":3935,"_stem":3936,"_extension":19},"/en-us/blog/how-we-keep-investors-in-the-loop",{"title":3921,"description":3922,"ogTitle":3921,"ogDescription":3922,"noIndex":6,"ogImage":3923,"ogUrl":3924,"ogSiteName":667,"ogType":668,"canonicalUrls":3924,"schema":3925},"How we keep investors in the loop","Monthly updates to investors and team members ensure transparency and open communication.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678944/Blog/Hero%20Images/investorupdate.jpg","https://about.gitlab.com/blog/how-we-keep-investors-in-the-loop","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we keep investors in the loop\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-10-17\",\n      }",{"title":3921,"description":3922,"authors":3927,"heroImage":3923,"date":3928,"body":3929,"category":808,"tags":3930},[930],"2018-10-17","\nI was a bright-eyed and bushy-tailed new GitLab team-member of two months when I emailed\n[Sid](/company/team/#sytses), the CEO of GitLab, and told him that I thought the investor\nupdate format needed a makeover. During my onboarding, I had heard that\n[**everyone can contribute**](/company/strategy/#why), so I decided to take the idea\nfor a test drive.\n\n![My message to Sid.](https://about.gitlab.com/images/blogimages/suriemail.png){: .shadow}\n\nI obsessively refreshed my inbox, waiting to see whether the sentiment was highly\nregarded only in theory, when Sid’s reply arrived.\n\n![Sid’s response.](https://about.gitlab.com/images/blogimages/sidemail.png){: .shadow}\n\n_Challenge accepted, Sijbrandij._ 😎\n\n## Updating the template\n\nOur investor update has gone through several iterations over the years. In the\nearly days, we included sections on hiring, feedback, and upcoming features.\nAfter reading blog posts by\n[Elad Gil](http://blog.eladgil.com/2015/05/investor-update-emails.html) and\n[Aaron K. Harris](http://www.aaronkharris.com/investor-updates), Sid developed\nour current version with their insights in mind and narrowed the scope of our\nupdate to the following sections: thanks, asks, key metrics, lowlights,\nhighlights, and next month expectations.\n\nWhen I joined GitLab, the investor update looked cluttered, and from a reader’s\nperspective, I had difficulty absorbing the information. Below is an example of\nan old update with the former template. Please note that some names have been\nchanged to respect organizations’ privacy.\n\n![Former investor update template.](https://about.gitlab.com/images/blogimages/oldtemplatev3.png){: .shadow}\n\nKnowing that investors can only dedicate a few minutes to each email, I knew that\nI had to organize the sections and copy in a way that would increase comprehension\nand reading speed, so I employed UX copy techniques.\n\n![New investor update template.](https://about.gitlab.com/images/blogimages/originaltemplate.png){: .shadow}\n\nWith this new format, investors can quickly read the update and locate the\ninformation that is most relevant to them.\n\n### Why these categories are important\n\nEach of the seven sections provides investors with a look inside GitLab, offering\na comprehensive assessment of our monthly performance.\n\n1. **CEO foreword**: A brief introduction that will typically coincide with the close of a fiscal quarter. This narrative will provide a high level overview of company operations from the most recently ended quarter as well as key initiatives and expectations for upcoming quarters. \n1. **Thanks**: We express gratitude for investors who have assisted us with\nmaking introductions, providing feedback, or offering assistance. Investing is a\ntype of social engagement, and we like to celebrate people who set aside time to\nhelp us.\n1. **Asks**: We ask our investors to help us connect with people or\norganizations, introduce us to hiring candidates, or provide some other assistance.\nInvestors can be extremely helpful and often say they want to add\nvalue when they invest, so this area of the update gives them the opportunity to\ndrive our business forward.\n1. **Key metrics**: People want to know how their investment is performing.\nOffering figures instills trust and shows a certain discipline and rigor. We\nwant our investors to know how we’re doing - even when we don’t meet our goals -\nbecause we believe in [transparency](https://handbook.gitlab.com/handbook/values/#transparency).\n1. **Lowlights**: Our commitment to open communication extends to this\nsection in which we always list the top three worst things that occurred in the\nmonth. By committing to three items, the question is no longer, “_Should_ I tell\nmy investors?” It’s “_Which_ three things are the most severe?” That's a\nmuch easier question to answer.\n1. **Highlights**: This section gets people excited about the investment and\nillustrates what we’re doing well.\n1. **Expectations**: We discuss what we’re looking forward to, conferences we’re\nattending, and what we’re planning in the next month.\n\n### Have a fixed number of good and bad things\n\nEvery month we send three lowlights and three highlights.\nThis forces us to always tell the three things that are worst.\nWe never have to wonder if something is important enough to include it.\nBy the severity of the items people can tell if it was a good or bad month.\n\n## Every company should send monthly updates\n\nIf you’re not sending investor updates, you’re keeping your biggest proponents\nin the dark. If people invested in your organization, you should keep them up to\ndate on what's happening with their investment. If investors don’t receive regular\ncommunication, they’re forced to go fishing for information and what they might\nhear could be inaccurate.\n\nMonthly updates instill confidence and save you from having to\nfield questions from several directions. When organizations don’t communicate,\ninvestors constantly have to ping their companies to ask how things are going.\nBut, if they receive regular updates, they know they're going to hear from you\nand learn the most challenging things that happened in the previous month. You\ndon’t want to give your investors any reason to worry.\n\nMonthly updates also help you build stronger bonds with investors. Because the\nbasics of an investment are covered each month, conversations with investors\ncan focus on deeper subjects. You can brainstorm about strategy, long-term\ngoals, and emerging trends rather than recap hiring challenges and share\nrelease updates.\n\n## Investors \u003C3 information\n\nInvestors have told us how much they love our updates, specifically expressing\ntheir appreciation of the reliability of our emails. We send the updates around\nthe 10th (give or take 1-3 days) of every month, so investors have come to\nexpect a little GitLab sunshine in their inbox.\n\nInvestors love the format(!) and [Y Combinator](http://www.ycombinator.com/)\nreached out to Sid asking whether the format could be shared with other YC\nfounders in a resource of high quality updates. As Sid says, “The format seems\nto be better than average.”\n\n>“I want to thank you and also commend you for having such\nconsistent, regular, excellent shareholder communications. It’s rare\nto see, and I think it elevates your company and it’s something that\nonly grows in importance as the company scales.” — GitLab investor\n\n## Now it’s your turn\n\nIf you’d like to send your investors a monthly update, we invite you to\n[create a copy](https://docs.google.com/document/d/1TVpESZlemYWLrXQHeDvnASCFvs_tvjpJnLahEFMIxYE/copy)\nof our template. As you work on your update, please remember that it’s important\nto establish a regular cadence and keep the emails concise. Links to spreadsheets\nwith detailed figures and an offer to answer any questions prevents people from\nbecoming overwhelmed.\n\nOur updates are also sent to team members, because we all have stock options and\nSid believes that it’s the company’s duty to inform us of our investment.\nMoreover, team members should know the highlights, lowlights, and next month's\nexpectations, since we’re all working towards a [common goal](/company/strategy/#sequence).\nWe encourage you to send the updates to your team since they invest their talents,\nideas, and efforts into making your organization successful.\n\nUPDATE: To see what we currently do, see our [Investor Relations page on our Monthly Investor Updates](/handbook/finance/investor-relations/#monthly-investor-update-email).\n",[873,9],{"slug":3932,"featured":6,"template":680},"how-we-keep-investors-in-the-loop","content:en-us:blog:how-we-keep-investors-in-the-loop.yml","How We Keep Investors In The Loop","en-us/blog/how-we-keep-investors-in-the-loop.yml","en-us/blog/how-we-keep-investors-in-the-loop",{"_path":3938,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3939,"content":3945,"config":3951,"_id":3953,"_type":14,"title":3954,"_source":16,"_file":3955,"_stem":3956,"_extension":19},"/en-us/blog/how-we-made-gitlab-more-secure-in-twenty-twenty",{"title":3940,"description":3941,"ogTitle":3940,"ogDescription":3941,"noIndex":6,"ogImage":3942,"ogUrl":3943,"ogSiteName":667,"ogType":668,"canonicalUrls":3943,"schema":3944},"How we made GitLab more secure in 2020","From preventing vulnerabilities to squashing bugs in source code; here’s how our security team has made GitLab more secure in 2020, and where they’ll focus efforts in 2021.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670800/Blog/Hero%20Images/sec-2020-review.png","https://about.gitlab.com/blog/how-we-made-gitlab-more-secure-in-twenty-twenty","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we made GitLab more secure in 2020\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Johnathan Hunt\"}],\n        \"datePublished\": \"2020-12-16\",\n      }",{"title":3940,"description":3941,"authors":3946,"heroImage":3942,"date":3948,"body":3949,"category":698,"tags":3950},[3947],"Johnathan Hunt","2020-12-16","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n2020 was a highly-productive year, and one with high impact, which brought a number of security enhancements across GitLab’s product and environment.\n\nOur primary goal of strengthening GitLab’s enterprise grade security was accomplished through the implementation of numerous security controls and led to the successful completion of our first SOC 2 Type 2 attestation. We completed a 2 month field security study which consumed and aggregated data from current and prospective customers, the broader community, industry and several internal stakeholders (sales, support and product) to generate a report with prioritized areas of focus for our SaaS service.  Our teams have started strategic work aligned to these priorities and designed to further enhance security in our enterprise service, strengthen our competitive position and bolster the trust and confidence of our customers.\n\nWe also saw advancements in our goal of reducing the threat landscape.  Vulnerability management was dramatically improved across all aspects of security including [application security](/topics/devsecops/) (reduced: time to mitigate, total overall vulnerabilities, and number of high severity vulnerabilities), infrastructure security (improved scanning capabilities and accuracy of detection as well as reduced time to patching and mitigation) and bug bounty (increased engagement, improved response and remediation). We implemented an industry leading governance, risk and compliance tool which improved the effectiveness and efficiency of risk management and third-party vendor reviews.  As a result, we saw a substantial improvement in customer adoption and third party security scoring.\n\nAs we look ahead into 2021, we will continue to focus on strengthening the security of GitLab Core and SaaS through a number of new and improved security features and services.  Further, we will ambitiously pursue a host of compliance certifications to independently validate implemented security controls designed to protect company and customer data.  Lastly, we continue to strive for and assert ourselves as the [most transparent security organization](/handbook/security/) in the world.  We are committed to finding creative and innovative ways of sharing our approach to security openly in our publicly available [handbook](/handbook/security/) and [blogs](/blog/tags.html#security).\n\n## Stronger intel for increased visibility, detection and response\n\n### Next gen SIEM\nIn October, our [Security Incident Response team (SIRT)](/handbook/security/#sirt---security-incident-response-team-former-security-operations) onboarded a next generation SIEM from [Panther Labs](https://runpanther.io/) to increase visibility into our environments, improve processes around our log volumes, and build modern detection and response processes. This increased visibility into the infrastructure for GitLab.com and the GitLab organization allows SIRT to more effectively reduce risk for customers and users and increases confidence in our platform. By leveraging modern tooling, we are able to manage the large volumes of logs and event data that are produced each day, scale our processes, and highlight potentially serious issues before they impact the community. In 2021, we’ll dive into this tooling and further build upon our detection and response processes and capabilities.\n\n### Publicly-available deep dives into technical challenges\nDuring our day-to-day work, the GitLab [Red Team](/handbook/security/threat-management/red-team/) often stumbles upon technical challenges that we need to overcome. We felt that it was important to capture these challenges and the solutions we discover and document them to help others who may be doing similar work and encountering the same problems. We created a public project called [Red Team Tech Notes](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/red-team-tech-notes) and began documenting things there. This project contains everything from our public technical presentations to research papers and discovered vulnerabilities. By sharing this information publicly, others can learn and benefit from our work and experiences. In addition, we encourage the community to provide us with feedback on our research that may help us learn new things, improve our operations and increase the value and quality of our content. In 2021, we'll be focusing on purple-teaming, business relevant table-top exercises and improving existing tooling to aid our SIRT team operations.  We're also going to be holding an [Ask Me Anything/AMA session on Jan 26](https://docs.google.com/forms/d/e/1FAIpQLSekc1LYWYbhORNzZvLza8Btn9V0wY7K9SGVZed5RpJbczqdfw/viewform?usp=sf_link) and we'd love for you to join us.\n\n**Note:** Shout out to [@Jurbanc](https://gitlab.com/jurbanc) and [@smanzuik](https://gitlab.com/smanzuik) who provided content for this section!\n{: .note}\n\n## Security assurance: from audits to automation\n\n### Achieving SOC2 compliance\nOur [Security Assurance team](/handbook/security/security-assurance/) team kicked off 2020 with the achievement our first security certification in February, a [SOC 2 Type 1 report](/handbook/security/security-assurance/security-compliance/certifications.html) based on the trust service criteria related to security. Obtaining the SOC 2 Type 1 report provides our customers with a measurable result of GitLab, Inc. and GitLab.com’s overall security posture. Additionally, the report provides insight into security and entity level controls implemented at GitLab to ensure compliance with industry standard security requirements. It also serves as attestation by an independent third-party on the effectiveness of our security controls for the proper storage and processing of client data. We blogged about our experience in this first audit in [“The benefits of transparency in a compliance audit”](/blog/benefits-of-transparency-in-compliance/).\n\n### Proactive security risk identification and mitigation\nLater in 2020 (April and May), our team formally established a [Security Operational Risk Management program (StORM)](/handbook/security/security-assurance/security-risk/storm-program/index.html) and executed our first NIST/ISO based annual security risk assessment. StORM implements a proactive approach to identifying and mitigating security risks for GitLab the company and the product. In building this program, the Security Assurance team identified risk factors surrounding the impact of security risks internally, to customers and to our legal and regulatory obligations. This program helps us prioritize risk mitigation activities according to the impact a security risk may have on customers and provides customers with assurance that security risks impacting the GitLab product are triaged and mitigated accordingly, based on the risk level.\n\n### Your questions, answered transparently and efficiently\nTo increase transparency and support self-serve access to GitLab’s security information and collateral, our Security Assurance team deployed the first iteration of GitLab’s [Customer Assurance Package (CAP)](/handbook/security/security-assurance/field-security/customer-assurance-package.html) in April. Like all software vendors, we routinely receive requests about the security posture of our products and services from customers and potential customers. The CAP increases our efficiency and reduces time to closure of vendor security assessments on GitLab. Our intent is to continue to grow and curate package content based on GitLab customer needs. Since deployment, the CAP has matured to version 2.0 and an internal RFP tool, [GitLab AnswerBase](/handbook/security/security-assurance/field-security/answerbase.html), has been deployed using GitLab.com to enable future package expansion through standardization and automation.\n\nWhat’s next? Our Security Assurance team has kicked off SOC 2 Type 2 and SOC 3 audits and look forward to receiving and sharing reports in Q1 2021. The new year will also bring a heavy focus on automating continuous control monitoring and expansion of our CAP to better meet our customers needs.\n\n**Note:** Shout out to [@mmaneval20](https://gitlab.com/mmaneval20), [@sttruong](https://gitlab.com/sttruong),   [@lcoleman](https://gitlab.com/lcoleman) [@dsharris](https://gitlab.com/dsharris) and  [@julia.lake](https://gitlab.com/Julia.Lake) who provided content for this section! \n{: .note}\n\n## Securing our product with automation, dependency scanning and bug hunting\n\n### Preventing accidental key disclosure\nThe Security Automation team, in collaboration with the GitLab Secure & Protect teams and our AWS Security counterparts, has developed functionality to identify AWS instance keys that are accidentally publicly disclosed through a repo on GitLab.com. [The new functionality](/releases/2020/11/22/gitlab-13-6-released/#support-for-post-processing-of-leaked-secrets) will alert AWS of the disclosure and the finding will appear in the security dashboard within the GitLab project. The issue of accidental key disclosure is serious and warranted action to protect our customers and community members from key compromises that could lead to significant data breaches and unexpected incurred infrastructure costs. This added functionality allows the repo owner and AWS to take action to prevent the malicious use of the disclosed key.\n\n### Package Hunter for enhanced dependency scanning \nApplications today tend to rely upon 3rd-party dependencies to enable functionality, but securing that supply chain is a difficult problem. Most existing dependency chain security tools help developers to identify dependencies known to be malicious or with known vulnerabilities. The [Security Research team](/handbook/security/threat-management/security-research/) has developed a product, called Package Hunter to identify malicious packages using dynamic behavior analysis. The type of malicious dependencies that Package Hunter seeks to identify are those that try to exfiltrate sensitive data, or run unintended code, such as a cryptocurrency miner. Package Hunter is still in the prototype phase, but is already running in GitLab pipelines as we work on maturing its functionality. It enhances existing dependency security tools by identifying not previously known malicious packages as part of their security testing and will help developers avoid adding malicious dependencies before merging them fully into their application.The hope is to transition Package Hunter into a product feature that all customers can use to secure their applications.\n\n### Squashing bugs and vulnerabilities\nOur [bug bounty program](https://hackerone.com/gitlab) takes a community-driven, hacker-powered approach to security and plays a crucial role in our multilayered approach to reducing risk.  2020 was a big year for this program, starting off with a bang as we hit the [million dollar bounties paid](/blog/celebrating-one-million-bug-bounties-paid/) milestone in January, followed by making our way to #6 on [HackerOne’s 2020 Top Ten Public Bug Bounties program list](https://www.hackerone.com/resources/e-book/top-10-bounty-programs-2020) in June. Throughout the year, the program received a total of 1082 reports from 508 security researchers and awarded $381K USD in bounties. Our development teams resolved 268 reports and, true to our value of transparency, we have made 133 of those reports public [(see our disclosure policy)](https://hackerone.com/gitlab/#disclosure). The success of this program and the innovative contributions from these deeply talented security researchers across the globe further secures and strengthens our product and company. In 2021, we’ll continue refining our processes, driving down triage and response times, and developing [initiatives focused on recognition and engagement](https://about.gitlab.com/blog/twenty-twenty-through-a-bug-bounty-lens/#bug-bounty-program-updates). You can read more about this program in this [HackerOne case study](https://www.hackerone.com/resources/gitlab/gitlabs-approach-to-security). \n\n**Note:** Shout out to [@laurence.bierner](https://gitlab.com/laurence.bierner), [@dappelt](https://gitlab.com/dappelt), [@estrike](https://gitlab.com/estrike) and [@heather](https://gitlab.com/heather) who provided content for this section! \n{: .note}\n",[720,9],{"slug":3952,"featured":6,"template":680},"how-we-made-gitlab-more-secure-in-twenty-twenty","content:en-us:blog:how-we-made-gitlab-more-secure-in-twenty-twenty.yml","How We Made Gitlab More Secure In Twenty Twenty","en-us/blog/how-we-made-gitlab-more-secure-in-twenty-twenty.yml","en-us/blog/how-we-made-gitlab-more-secure-in-twenty-twenty",{"_path":3958,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3959,"content":3965,"config":3971,"_id":3973,"_type":14,"title":3974,"_source":16,"_file":3975,"_stem":3976,"_extension":19},"/en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab",{"title":3960,"description":3961,"ogTitle":3960,"ogDescription":3961,"noIndex":6,"ogImage":3962,"ogUrl":3963,"ogSiteName":667,"ogType":668,"canonicalUrls":3963,"schema":3964},"How we optimized infrastructure spend at GitLab","We keep our cloud spend under control with a spend optimization framework – now we're sharing it with you.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681701/Blog/Hero%20Images/piggy_bank.jpg","https://about.gitlab.com/blog/how-we-optimized-our-infrastructure-spend-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we optimized infrastructure spend at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Davis Townsend\"}],\n        \"datePublished\": \"2020-10-27\",\n      }",{"title":3960,"description":3961,"authors":3966,"heroImage":3962,"date":3968,"body":3969,"category":743,"tags":3970},[3967],"Davis Townsend","2020-10-27","\n\nInfrastructure spend optimization is a hot topic these days as many established companies are migrating workloads to the cloud. Similarly,  fast-growing startups are struggling to control their operating costs as they expand their cloud footprint to meet user demand. \n\nAt GitLab we have taken a methodical and data-driven approach to the problem so we can reduce our cloud spend and control our operating costs, while still creating great features for our customers. We designed a five-stage framework which emphasizes building awareness of our infrastructure spend to the point where any change in costs is well understood and no longer a surprise.\n\nOur framework is very similar to a normal data maturity framework (shown below) that would progress through descriptive, predictive, and finally prescriptive analytics, but we tailor it specifically for this domain. I'll explain each stage and what it looks like at GitLab so you can see how you might apply it to your own organization.\n\n![Normal Data Maturity Framework](https://about.gitlab.com/images/blogimages/2020-10-28-How-We-Optimized-Infra-spend/DMM.jpeg \"Normal Data Maturity Framework\"){: .medium.center}\nA normal data maturity framework \n{: .note.text-center}\n\n## Spend optimization framework\n\n## 1. Basic cost visibility\n This stage can be thought of as data exploration. You just want to understand as much as you can about where you are spending money at a high level. What vendors and services are you spending the most money on? This data is generally provided by cloud vendors through a billing console, as well as through billing exports. I've found the way to get the best use out of both options is to use the provided billing console for answering simple questions about specific costs quickly, and the exports for integrating this data into your own analytics architecture for more granular reporting, [multicloud](/topics/multicloud/) reporting, or for specific recurring reports you need over a longer time horizon.\n \n### GitLab example\nWhen starting out, we looked at Google Cloud Platform (GCP) and their [Default Billing Export](https://cloud.google.com/billing/docs/how-to/export-data-bigquery) to get an overview of which products/projects/SKUs were responsible for the majority of our spend.\n\n## 2. Cost allocation\nThis stage is all about going from high-level areas of spend to more granular dimensions that tie back to relevant business metrics in your company. At GitLab we may want to look at what we spend on particular services like CI runners, or what we spend to support employees using GitLab.com as part of their job vs. customer spend. This data may not be readily available to you so there could be a lot of work involved to tie these sorts of relevant business dimensions back to the cost reports provided by your vendor.\n\n### GitLab example\nFor our production architecture we had some [GCP labels](https://cloud.google.com/compute/docs/labeling-resources) that indicated the internal service applied to the majority of our instances, so we started with those to see which services we spent most of our money on. More recently, we have created a [handbook page for Infrastructure Standards](/handbook/infrastructure-standards/) around project creation and label naming so that we can get even more insight out of our bill.\n\n\n\n## 3. Optimize usage efficiency\nOnce you can allocate costs to their relevant business metrics, then can you start to ask interesting questions such as, “Why is our storage spend so high on feature x?” By asking these questions and then talking with the subject matter experts about these potential areas of optimization you can start to come up with ideas to reduce some of this cost.\n\n### GitLab example\nWhen we reached this stage we began to identify many areas of opportunity, including:\n\n- [CI runners](https://gitlab.com/gitlab-org/gitlab/-/issues/35777): One of the areas discovered from stage 2 happened to be our CI runners, for which we created more granular reporting to see the cost by specific repos, pipelines, and jobs, which allowed us to find some ways to optimize our own internal use of CI.\n- [Object storage](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/10087): We discovered high storage costs for outdated Postgres backups. We resolved this by enabling bucket lifecycle policies and reduced our object storage for that bucket by 900TB.\n- [Network usage](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/10222): By correlating a recent change in our spend profile to a network architecture change, we were able to highlight the need for additional changes. We ultimately implemented a change to directly download runner artifacts from GCS instead of having the traffic be proxied. This significantly reduced our overall networking cost.\n\n## 4. Measure business outcomes vs spend\n\nWhen you get to a point for a particular area where you feel like you have done all the basic optimizations and aren't sure where else you could reduce cost without seriously impacting your employees or customers, you have reached stage 4. This stage is all about analyzing the value of more complex changes that could reduce spend at the expense of something else, as well as considering the value and cost impact of major feature or architectural changes in the future.\n\n### GitLab example\nOur best example of this was our recent rollout of [advanced global search](https://docs.gitlab.com/ee/user/search/advanced_search.html) to all paid users on GitLab.com. In the first iterations of testing for this feature our costs were exceptionally high. Through a lot of hard work by the team responsible for the feature, they were able to significantly bring down the costs while improving functionality. Through those efforts, GitLab was able to bring this great feature to the platform in a way that also made sense from a business perspective.\n\n## 5. Predict future spend and problem areas\nOnce your company has matured the practices above, you can start to become proactive about observing cost. You can also begin to detect and alert when spend is outside expected thresholds. Once you get to this point, infrastructure optimization should become a boring topic, and when you no longer have any cases of huge unexpected cost increases that were not due to unexpected increases in customer demand, you know you are doing a great job.\n\n### GitLab example\n\nWe’re still working on this stage ourselves. While we’ve had some success in detecting unexpected spend, and even tying it to anomalous behavior in our platform, we recognize we have much more to do here. We are still working to get most of our usage to Stages 3-4, while spending parallel effort to reach Stage 5 for some more mature workloads.\n\n## Current state and next steps\nToday at GitLab, depending on the workload, we are anywhere between stages 1-4. The bulk of the work is going into getting everything to at least stage 2, and from there we can work on getting everything to stages 3-4. Current efforts include applying our newly created [infrastructure standards](/handbook/infrastructure-standards/) across all of our infrastructure, bringing in relevant product usage data from our various services, and giving PMs the tools they need to better manage the cost of their services through a single source of truth of base level cost metrics.\n\n## Workflow and planning\nCost optimization is a difficult topic to tackle effectively as it involves many different stakeholders across the business who all have their own priorities. The way we are taking this problem on at GitLab is we have an [issue board](https://gitlab.com/groups/gitlab-com/-/boards/1502173?label_name[]=infrafin) where we plan and track progress on issues related to infrastructure spend. For all the major initiatives we assign priority to these based on four factors:\n\n1.  Cost savings\n2.  Customer impact  \n3.  Future potential cost impact\n4.  Effort required\n  \nThese factors are discussed and reviewed by our analyst, our SaaS offering product manager, and the relevant subject matter expert for the area. Once the priority is agreed upon, the product manager works with various product teams to get these scheduled into milestones or backlog queues for the teams that need to implement the changes. Progress is tracked on the issue board, and reviewed for priority to ensure the solution moves forward at an appropriate velocity.\n\n## More to read\n\nAll of this info and more can be found in our [Cost Management Handbook](/handbook/engineering/infrastructure/cost-management/). We continue to improve this page to provide our own employees with the resources they need to understand this topic better, as well as providing external viewers some idea of how they could think about infrastructure optimization in their own company.\n\nYou might also enjoy:\n* [What we learned after a year of GitLab.com on Kubernetes](/blog/year-of-kubernetes/)\n* [How we migrated application servers from Unicorn to Puma](/blog/migrating-to-puma-on-gitlab/)\n* [How we upgraded PostgreSQL at GitLab.com](/blog/gitlab-pg-upgrade/)\n\nCover image by [Fabian Blank](https://unsplash.com/@blankerwahnsinn?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[1296,9,811,2749],{"slug":3972,"featured":6,"template":680},"how-we-optimized-our-infrastructure-spend-at-gitlab","content:en-us:blog:how-we-optimized-our-infrastructure-spend-at-gitlab.yml","How We Optimized Our Infrastructure Spend At Gitlab","en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab.yml","en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab",{"_path":3978,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3979,"content":3985,"config":3991,"_id":3993,"_type":14,"title":3994,"_source":16,"_file":3995,"_stem":3996,"_extension":19},"/en-us/blog/how-we-prevented-security-fixes-leaking-into-our-public-repositories",{"title":3980,"description":3981,"ogTitle":3980,"ogDescription":3981,"noIndex":6,"ogImage":3982,"ogUrl":3983,"ogSiteName":667,"ogType":668,"canonicalUrls":3983,"schema":3984},"How we prevented security fixes leaking into our public repositories","Working in the open makes it difficult to work on security vulnerabilities before they're disclosed, especially when that openness discloses them early!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667227/Blog/Hero%20Images/security-leaks-unlocked.jpg","https://about.gitlab.com/blog/how-we-prevented-security-fixes-leaking-into-our-public-repositories","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we prevented security fixes leaking into our public repositories\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robert Speicher\"}],\n        \"datePublished\": \"2021-01-04\",\n      }",{"title":3980,"description":3981,"authors":3986,"heroImage":3982,"date":3988,"body":3989,"category":743,"tags":3990},[3987],"Robert Speicher","2021-01-04","One of GitLab's core values is \"[public by default][],\" which means we develop in\nthe open whenever possible. One notable exception to this is security fixes,\nbecause developing security fixes in public discloses vulnerabilities before a\nfix is available, exposing ourselves and our users to attacks.\n\nIn order to work on these security issues in private, public GitLab projects\nhave a security mirror that's accessible only to GitLab engineers. A design flaw in GitLab's mirroring feature would cause commits from the\nSecurity repository to be exposed in the public repository before they were\nintended for release.\n\nIn this post we'll describe what the problem was and how we finally resolved it.\n\n[public by default]: https://handbook.gitlab.com/handbook/values/#public-by-default\n\n## Mirroring setup\n\nTo ensure that developers working on a security fix are working against the\nlatest code for a project, we utilize GitLab's [push mirror](https://docs.gitlab.com/ee/user/project/repository/mirror/index.html) feature to mirror\nthe public (\"Canonical\") repository to its private Security fork.\n\nOn every commit to the Canonical repository, the Security repository receives\nthe same commit. All of the mirroring is performed by the [Gitaly][gitaly]\nserver, which handles all of the Git calls made by GitLab.\n\nIn order to know which Git objects in the source are missing on the destination,\nGitLab would [fetch the remote][] and then tell Gitaly to perform the push that\nwould bring the two in sync, which is where the trouble starts.\n\nBy performing a fetch, _every Git object in the Security repository was now\nknown and stored on-disk by the Canonical repository_. If someone knew the SHA\nof a commit in the _private_ repository that contained a security fix, they\ncould view it in the _public_ repository and discover the vulnerability we were\nfixing before it had been publicly disclosed.\n\n[push mirror]: https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html\n[gitaly]: https://gitlab.com/gitlab-org/gitaly\n[fetch the remote]: https://gitlab.com/gitlab-org/gitlab/blob/f5bfe5603137b8f9cf60a2db759db3dbe5c60727/app/services/projects/update_remote_mirror_service.rb#L30\n\n## No guessing necessary\n\nThankfully, even a truncated Git commit SHA is difficult to guess, so at first\nglance this might not look like a high-severity issue.\n\nHowever, the [GitLab help page](https://gitlab.com/help) shows exactly which\ncommit is currently running, and we always deploy security fixes to GitLab.com\nfor verification and to protect our users against the latest threats. Here's\nwhat that might look like:\n\n> ### GitLab Enterprise Edition 13.7.0-pre [690e4bbfe94][]\n\nWhen a security release was in progress, any logged-in user could click on the\nrunning commit SHA and view the entire [source code](/solutions/source-code-management/) tree at that point, security\nfixes included!\n\n[690e4bbfe94]: https://gitlab.com/gitlab-org/gitlab/-/commits/690e4bbfe94\n\n## Experimenting with a fix\n\nThe mirroring setup was a crucial part of our development and release process,\nand the existing fetch-based behavior was itself a crucial piece of what made\nthe mirroring functionality work. During our initial investigation, there was no\nobvious fix. One proposed workaround was to simply remove the SHA from the Help\npage, but that would only hide the problem and \"security through obscurity\"\nisn't really security at all.\n\nAnother workaround, which we [ended up implementing][mirror pause], was to\npause the mirroring as soon as a security fix was merged, and re-enable it\nonce the security release was published. This prevented the leak because the\nfetch was no longer happening, but it would \"stop the world\" while we worked\non a security release. The Security mirror quickly fell behind public\ndevelopment, which created a risk of new features causing merge conflicts\nwith the security fixes, or vice versa.\n\nStaff engineer [Jacob Vosmaer][], who began the Gitaly project within GitLab,\n[pointed out][] that, strangely, we only used this fetch-based behavior for\nbranches; tags used Git's low-level [`ls-remote` command][ls-remote].\n\nWhereas Git's `fetch` command creates a local copy of every object from the\nremote repository, the `ls-remote` command only prints the remote's available\nreferences to the terminal. If we used `ls-remote` for branches like we did for tags, the commits from\nthe mirror would no longer be persisted on-disk, and thus wouldn't be\navailable in the public repository.\n\nBecause push mirroring is such a critical part of our own workflow as well as\nour users', we didn't want to just make the change and hope for the best. We\n[set up an experiment][], where the old functionality stayed exactly as it was,\nbut when a [feature flag][] was enabled, we'd also gather the same commit\ninformation using `ls-remote`, and compare the new results to the original,\nlogging any differences.\n\nThe experiment ran on GitLab.com for about a month without major discrepancies.\nIt looked like we had a solution!\n\n[mirror pause]: https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/626\n[Jacob Vosmaer]: /company/team/#jacobvosmaer-gitlab\n[pointed out]: https://gitlab.com/gitlab-org/gitlab/-/issues/38386#note_312363006\n[ls-remote]: https://git-scm.com/docs/git-ls-remote.html\n[set up an experiment]: https://gitlab.com/gitlab-org/gitaly/-/issues/2670\n[feature flag]: https://docs.gitlab.com/ee/operations/feature_flags.html\n\n## Iterating on the experiment\n\nConsidering the experiment a success, but still being wary of breaking a key\npiece of functionality, we proceeded with caution. Rather than replacing the old\nbehavior outright with the new, we [split the two paths based on a feature\nflag][split].\n\nWhen the flag was disabled the old, tried-and-true behavior would be used. With\nthe flag enabled, we'd use the new. We shipped this change and left the flag\nenabled, watching for errors.\n\nAfter two weeks without any reported mirroring errors, and with the security\nleak no longer occurring, we were satisfied we had found our fix.\n\nFirst we shipped a self-managed release [with the feature flag enabled by\ndefault][flag enabled], to ensure that if something unexpectedly broke for those\ninstallations it would be easy to revert to the previous behavior. Finally, after no errors reported from self-managed users, we [removed the\nfeature flag along with the old behavior][flag removal], and closed out the\nconfidential issue.\n\n[split]: https://gitlab.com/gitlab-org/gitaly/-/merge_requests/2183\n[flag enabled]: https://gitlab.com/gitlab-org/gitaly/-/merge_requests/2330\n[flag removal]: https://gitlab.com/gitlab-org/gitaly/-/merge_requests/2417\n\n## An annoying bug emerges\n\nShortly after making the new behavior the default, we started getting\n[complaints from team members][complaints]. They'd receive an automated email\ntelling them that a push mirror was broken, only to go check on the mirror and\nbe told everything was fine.\n\nThis went on for about two months due to the transient nature of the errors.\nEvery time we'd get an email and check to see if it was accurate, the mirroring\nreported everything was fine.\n\nAs we began to implement [a new piece of tooling][new tooling] that depended on\naccurate status reporting from push mirroring, the problem became bigger than a\nfew annoying, seemingly inaccurate emails; it was causing our tooling to behave\nerratically as well.\n\nBecause we had absolutely no idea what was happening or why, our first step was\nto [add logging][] when Gitaly was encountering an error that would mark the\nmirror as failed. The logging [revealed a weird anomaly][anomaly] where it\nappeared that the Security repository – the one _receiving_ updates – appeared\nto be _ahead_ of its source:\n\n```\nI, [2020-09-21T10:10:31] Divergent ref due to ancestry -- remote:f73bb2388a6, local:59812e04368\nI, [2020-09-21T10:26:39] Divergent ref due to ancestry -- remote:8ddcb3333da, local:f73bb2388a6\n```\n\nIn this pair, the first message is saying that the remote – the Security\nrepository – was showing its latest commit as `f73bb2388a6`, and that it wasn't\nan ancestor of the local `59812e04368` commit, causing the error message. On the\nnext run, we see that the local repository has \"caught up\" to the Security\nremote from the prior run.\n\nIt turned out that due to the number of branches and tags in this repository,\nthe `ls-remote` command was taking so long to complete that by the time the data\nwas returned, the local repository was updated by a new push.\n\nBecause we gathered the remote refs after the local ones, a network delay\ncreated a window for new local commits to be written and invalidate our list\nof local refs. Luckily there was a nice [boring solution][]: all we had to do\nwas [swap the order][] in which we gather references.\n\n[complaints]: https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/914\n[new tooling]: https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/1111\n[add logging]: https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/914#note_413855603\n[anomaly]: https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/914#note_416246505\n[boring solution]: https://handbook.gitlab.com/handbook/values/#boring-solutions\n[swap the order]: https://gitlab.com/gitlab-org/gitaly/-/merge_requests/2606\n\n## Wrapping up\n\nAs soon as we swapped the order for gathering references, the transient errors\nwent away and we finally got to close this long-standing issue. We were pleased\nwith how we were able to modify such a critical piece of functionality safely\nand without any negative user impact.\n\n## Related issues\n\n- [Security commits available on GitLab.com](https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/426)\n- [Do not expose GitLab version on GitLab.com](https://gitlab.com/gitlab-org/gitlab/-/issues/38386)\n- [Populate remote branches in-memory via `ls-remote` rather than using `fetch`](https://gitlab.com/gitlab-org/gitaly/-/issues/2670)\n- [Transient push mirror divergence errors](https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/914)\n\nPhoto by [iMattSmart](https://unsplash.com/@imattsmart?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/broken-lock?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText)\n{: .note}\n",[9,720],{"slug":3992,"featured":6,"template":680},"how-we-prevented-security-fixes-leaking-into-our-public-repositories","content:en-us:blog:how-we-prevented-security-fixes-leaking-into-our-public-repositories.yml","How We Prevented Security Fixes Leaking Into Our Public Repositories","en-us/blog/how-we-prevented-security-fixes-leaking-into-our-public-repositories.yml","en-us/blog/how-we-prevented-security-fixes-leaking-into-our-public-repositories",{"_path":3998,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3999,"content":4005,"config":4010,"_id":4012,"_type":14,"title":4013,"_source":16,"_file":4014,"_stem":4015,"_extension":19},"/en-us/blog/how-we-release-software-patches",{"title":4000,"description":4001,"ogTitle":4000,"ogDescription":4001,"noIndex":6,"ogImage":4002,"ogUrl":4003,"ogSiteName":667,"ogType":668,"canonicalUrls":4003,"schema":4004},"Inside GitLab: How we release software patches","At GitLab, we tackle software patches in two ways – hands on and automatically. Learn how the release manager works to create and deliver essential fixes with auto-deploy releases on GitLab.com and patch releases for self-managed users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672273/Blog/Hero%20Images/patch.jpg","https://about.gitlab.com/blog/how-we-release-software-patches","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside GitLab: How we release software patches\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-05-13\",\n      }",{"title":4000,"description":4001,"authors":4006,"heroImage":4002,"date":4007,"body":4008,"category":743,"tags":4009},[672],"2020-05-13","\n\nYou can set your smartwatch by it: On the 22nd of every month, GitLab self-managed users can expect to see an update for the latest version of our self-managed product. In our monthly release, you might find new product features, iterations on existing features, and oftentimes you’ll see the end-result of requests for tooling or merge requests submitted by the community.\n\nBut just as in life, rarely is software development perfect. When a bug or security vulnerability surfaces, the release manager on the Delivery team will have to create a patch release for our self-managed customers. GitLab.com is continuously updated through the continuous delivery process. We call this CD process auto-deployments to avoid ambiguity with GitLab CD features. The auto-deploy process might include suggestions from merge requests submitted by users, customers, and our internal development team. So at GitLab, tackling the pesky problem of releasing software patches is solved in two very different ways.\n\n\"We are ensuring daily that everything built by developers is deployed on all environments prior to deploying to GitLab.com,\" explains [Marin Jankovski](/company/team/#marin), senior engineering manager, Infrastructure. \"You can think of a self-managed release as a snapshot of a GitLab.com deployment, with additional actions taken to ensure that our customers can use the same package for their own self-managed installations.\"\n\nRegardless of the origin of the bug or vulnerability, GitLab.com customers will receive the fix shortly after it has been created, which is a benefit of an automated CD process. The fixes for self-managed customers require specific preparation by the release manager.\n\nThe Delivery team works hard to automate more of the processes involved in creating a release to reduce the [mean time to production (MTTP)](/handbook/engineering/infrastructure/performance-indicators/#mean-time-to-production-mttp), which refers to the amount of time between when a developer merges a merge request to when it is deployed to GitLab.com.\n\n\"The whole mission of the Delivery Team is making sure that we can deliver faster as a company or at least enabling people to deliver faster, right?\" says Marin.\n\nBoth our self-managed and GitLab.com customers benefit from the Delivery team’s efforts to reduce cycle time and speed up deployments. In this blog post, we explain the similarities and differences between these two types of [GitLab releases](/handbook/engineering/releases/), and how the Delivery team prepares a patch release for our self-managed GitLab users and how they ensure that GitLab.com is always current using auto-deployments.\n\n## What does a release manager do?\n\nMembers of the GitLab Delivery team [rotate the responsibilities of being a release manager](/community/release-managers/) for our monthly self-managed releases, as well as the patch and security releases that might be shipped in-between. They are also responsible for efforts to migrate the company to automated, continuous deployments.\n\nOur self-managed releases and our GitLab.com releases use similar workflows and technology, but operate on different [timelines](/handbook/engineering/releases/#timelines), Marin explains.\n\nThe main priority for the release manager, regardless of the release type, is ensuring that GitLab stays available and secure since the application runs on GitLab.com, ensuring that the same issues do not trickle down to self-managed customer's infrastructure.\n\nWhen a bug or security vulnerability is reported fixed in GitLab, it is up to the release manager to evaluate whether or not it merits a patch or security release for our self-managed users. If the release manager decides the bug or vulnerability merits an update, they will start the preparation work.\n\nThe release manager has to decide whether or not to prepare a patch release or when to deploy it, and that largely depends on the context of the situation: \"And for now machines are not as good dealing with the context as humans are,\" says Marin.\n\n## All about patch releases\n\n### What is a patch release and why do we need them?\n\nThe release manager decides whether or not to issue a patch release based on the [severity of the bug being reported](/handbook/engineering/quality/issue-triage/#sts=Severity). The bugs are ranked based upon their severity – an S4 or S3 bug may be stylistic, such as a pixel or icon that is off tilt. It’s no less important, but it is less likely to impact someone’s workflow, and so it is unlikely that a patch release will be created just to fix an S4 or S3 vulnerability, Marin explains. Whereas an S1 or S2 vulnerability means a user may be prevented from upgrading to the newest version or there is a significant error impacting a user’s workflow. If an S1 or S2 bug is reported then that means a lot of people are likely experiencing it, so the release manager begins to prepare the patch release straightaway.\n\nOnce the fix is ready for an S1 or S2 vulnerability, the release manager will start the patch release. For example, the [GitLab 12.10.1 patch release](/releases/2020/04/24/gitlab-12-10-1-released/) was created after a few blocker issues were identified and developers fixed the underlying problem. The release manager estimated whether the assigned severities were correct, and after confirming, the patch release process was initiated and released within 24 hours of the blockers being identified.\n\nWhen the queue of S4s, S3s, and S2s starts to grow the release manager will look at the context to determine the urgency of the patch release. When the bugs start to pile up, the release manager will bundle the items together and ship them. A [patch or security release blog post](/releases/2020/04/24/gitlab-12-10-1-released/) summarizes the various fixes and updates that are pushed out to users in the form of patch or security releases.\n\n### How does the release manager create a patch release?\n\nWe use GitLab CI and various other GitLab features such as our ChatOps function to create GitLab patch release. The release manager will start the patch release by triggering the ChatOps command in our internal `#releases` channel in Slack.\n\n`/chatops run release prepare 12.10.1`\n\nThe ChatOps function works within Slack to trigger various events that GitLab then picks up and executes. For example, the Delivery team set-up ChatOps to automate a number of action items for the patch release, such as preparing the [relevant patch release issues](https://gitlab.com/gitlab-org/release/tasks/-/issues/1305), actionable items within the release, and so on.\n\nOnce the release manager triggers the ChatOps command using Slack, the rest of the process is automated within GitLab using our [CI/CD functions](/features/continuous-integration/). There is a lot of back-and-forth between ChatOps in Slack and GitLab throughout the release process as the release manager triggers some of the core steps in the process.\n\nWatch the video below for an in-depth look at the technical process behind preparing a patch release for GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/lHag9jARbIg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## Inside auto-deployments on GitLab.com\n\n### How do releases work on GitLab.com?\n\nThe process and tools used to update GitLab.com are similar to those used for creating a patch release. Updating GitLab.com requires less manual actions from the release manager.\n\nInstead of using ChatOps to trigger the deployment, we use CI features such as [scheduled pipelines](https://docs.gitlab.com/ee/ci/pipelines/schedules.html#working-with-scheduled-pipelines) which allow the release manager to schedule certain actions to happen at a particular time. Instead of a manual process, there is a pipeline every hour which checks for any new changes to GitLab projects, the changes are automatically pulled in, packaging and deployment scheduled, and automatically runs the QA testing and other required steps.\n\n\"So you have a lot of deployments happening on all of the different environments, before GitLab.com. And then once all these environments are in a good state and testing shows good results, the release manager takes an action to promote a deployment on GitLab.com,\" says Marin.\n\nThe CI/CD technology that powers updates to GitLab.com automates the release process, up to the point where a release manager will have to manually trigger deployment to the production environment for GitLab.com.\n\nMarin takes a deep dive into the process behind creating an update to GitLab.com in the video below. Watch to learn more about the process behind issuing an auto-deploy release.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/_G-EWRpCAz4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## What’s next for the Delivery team\n\nThe main difference between auto-deploy releases on GitLab.com and patch releases for self-managed customers is that the latter process is longer and requires more manual action on the part of the release manager.\n\n\"Sometimes we are delayed with creating releases for our self-managed customers because of the handover issues, because of the tooling issues, because of the too many variables that go into producing a single release,\" says Marin.\n\nOne of the short-term [goals for the Delivery team](/handbook/engineering/infrastructure/team/delivery/#vision) is to reduce the amount of manual intervention required on the part of the release manager to [increase release velocity](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/170). The team is working to simplify, streamline, and automate the release process, which will help turn around lower-tier severity fixes faster. The focus on speed is indicated by the core key performance indicator: Reduce the MTTP – the time it takes for a merge request to deploy to GitLab.com – from its current 50 hours to eight hours.\n\nThe Delivery team is also working to drive the changes necessary to shift [GitLab.com to a Kubernetes-based infrastructure](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/112). These are two different approaches that share the same goal: Shipping faster on GitLab.com and for self-managed customers.\n\n## Have ideas for us?\n\nEveryone can contribute to GitLab, and we welcome feedback from our readers. If you have ideas for the Delivery team, feel empowered to [create an issue and attach the label `team: Delivery`](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=team%3A%3ADelivery).\n\nCover photo by [Kyle Hinkson](https://unsplash.com/@kajhinkson?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/@kajhinkson?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note.text-center}\n",[9],{"slug":4011,"featured":6,"template":680},"how-we-release-software-patches","content:en-us:blog:how-we-release-software-patches.yml","How We Release Software Patches","en-us/blog/how-we-release-software-patches.yml","en-us/blog/how-we-release-software-patches",{"_path":4017,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4018,"content":4024,"config":4029,"_id":4031,"_type":14,"title":4032,"_source":16,"_file":4033,"_stem":4034,"_extension":19},"/en-us/blog/how-we-scaled-our-summits",{"title":4019,"description":4020,"ogTitle":4019,"ogDescription":4020,"noIndex":6,"ogImage":4021,"ogUrl":4022,"ogSiteName":667,"ogType":668,"canonicalUrls":4022,"schema":4023},"How we double the GitLab summit every year","Take a deep dive into the evolution of our summit, GitLab Contribute, keeping pace with a company that practically doubles in size annually.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673134/Blog/Hero%20Images/scale-our-summits.jpg","https://about.gitlab.com/blog/how-we-scaled-our-summits","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we double the GitLab summit every year\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-09-02\",\n      }",{"title":4019,"description":4020,"authors":4025,"heroImage":4021,"date":4026,"body":4027,"category":808,"tags":4028},[672],"2019-09-02","\nSince fewer than 10 GitLabbers convened in [Serbia for the first summit in 2013](/company/culture/contribute/previous/#summit-in-novi-sad-serbia), the annual meeting has nearly [doubled in size](/company/culture/contribute/previous/#back-in-the-day) each year, in tandem with the growth of our company. GitLab is projected to grow from a team of more than 830 today to more than 1,200 by 2020. The attendance list is getting (much) larger and the logistics more complex. We dive into the nuts and bolts of how our summit scales along with our community.\n\n## What is GitLab Contribute?\n\nFirst, let’s start with what it’s not: [Contribute](/events/gitlab-contribute/) is not an incentive trip, it’s not a conference, and it’s not a vacation. Contribute isn’t mandatory, but it is a unique opportunity to bring the minds that power GitLab together in one place. In 2019 the annual summit was renamed to “Contribute” to better reflect the intention of the experience: to build community with our colleagues and get some work done!\n\nBut just like Rome (which, unfortunately, is not a feasible host city for 2020), Contribute isn’t built in a day. There are numerous considerations that go into creating a successful program, including the location, logistics, program, and creating opportunities for team members to connect.\n\n## Start with the numbers\n\nThe planning process always starts with the most crucial number: the projected attendee list. We had roughly 575 folks (including some significant others) attend Contribute 2019, which is consistent with the 86% attendance rate we’ve seen with past summits, and is about double the number of attendees for our [Cape Town summit](/blog/gitlab-summit-cape-town-recap/).\n\nOur attendance projections for 2020 are double the 2019 numbers at about 1,186. That number of attendees necessitates moving into a different caliber of hotel that includes enough rooms, meeting spaces, and amenities to host more than 1,200 people at a time in a single place.\n\n## Logistics\n\n“The biggest challenge is keeping everybody together,” says [Kirsten Abma](/company/team/#kirstenabma), our corporate events manager. “For Contribute 2019 it would have been pretty doable to keep everybody together in one place, even if we hadn’t gone to New Orleans, but it’s getting trickier for 2020 and it’s going to get even trickier in 2021.”\n\nWhen evaluating a destination, Kirsten lists a few key considerations that narrow down the options:\n\n*   **Cost of transportation:** How much does it cost to bring our globally distributed team to one location?\n*   **Visas:** Is it simple for most community members to get a visa for this destination?\n*   **Number of hotel rooms:** Does the hotel have enough single and double rooms to meet our baseline requirements?\n*   **Meeting spaces:** Can we fit 1,200 people in this ballroom? Are there enough breakout rooms for the number of concurrent sessions?\n\nWorldwide, there are very few locations with hotels large enough to accommodate such a big group, and (sadly) popular vacation destinations and old-world cities are generally excluded from the list for this reason.\n\n“Paris, for example, is a great place to go – for excursions there are so many options. It’s really accessible too; there are a lot of flights going to Paris, Europeans can even take trains or drive. But then all the hotels cap at 500 rooms, and we’re asking for 1,000 rooms and for Contribute 2021 we need more than 1,600, so it’s not going to work.” In other words, GitLab won't always have Paris, unfortunately.\n\nOther factors that go into selecting a meeting space include:\n\n*   **Catering:** Can 1,000 people all dine within 1-2 hours? Can the hotel accommodate dietary restrictions?\n*   **Amenities**: Does the hotel have a restaurant, a gym, and a bar?\n*   **Lastly, the vibe**: Is the hotel crisp and clean? Does it have air conditioning? How do the beds feel? Does the hotel layout make sense?\n\nIn New Orleans, there were three hotels that were contenders, but the Hyatt, where [Contribute 2019](/blog/contribute-wrap-up/) was hosted, won out.\n\n“We walked into the Hyatt. We went up the escalators and we looked around and we knew ‘Yeah this is it,’” says Kirsten. “We instantly knew this was a great vibe. We knew that if it could fit our budget then it was our number one choice.”\n\n## Implementation\n\nThere are about 25 locations that are potential contenders for Contribute 2020 and 2021 based on our attendee list and other projections.\n\nThe corporate events team works with an agent that brokers contracts with the hotels so GitLab gets the most favorable deal possible. The first step is to send our request for proposals (RFP) to hotels in the locations we are considering. This helps us get the specs on different hotels and can help us whittle down our list to a few locations. Once it's down to between 3-5 locations, the events team begins site visits.\n\nThe next step is to reach out to local partners in those locations. Finding a good [destination management company](https://en.wikipedia.org/wiki/Destination_management) (DMC) is crucial to running a smooth event. The DMC has existing relationships with local vendors and can help broker deals on everything from airport transportation, to finding the best excursions, to even the tiniest details that add texture to the whole experience.\n\n“We always try to stay local and really show off the place we’re visiting, its history, things that are significant to that location,” says Kirsten. “These DMCs know everything about everyone and all the local vendors. So when we said we want glow-in-the-dark cotton candy for our masquerade ball in New Orleans, we got it.”\n\nYou have to be nimble in order to be a good event planner, and our events team often changes things up at the last minute. Some partners have difficulty adapting to how quickly we update our events to suit the particular circumstances (H/T to Meg Baird with [NOLA DMC](https://www.noladmc.com) who really pulled through on these things).\n\n“Our partners have to get used to the speed that we work at, because GitLab moves fast and so does our team. There are some venues that are like, ‘What? You mean tomorrow? No!’ Then we’re like ‘Yeah, let’s do this!’” We are literally changing up everything pretty much every week.”\n\n## Activities\n\nCreating a program to keep more than 1,000 people occupied for 4-5 days is one of the biggest challenges of scaling up the Contribute program.\n\n“I think one of the biggest evolutions is that previously we had everybody in the same session, or had broken it up into three or four sessions but the bigger you get the harder that becomes.”\n\n### Unconference sessions\n\nThe unconference sessions were piloted during the 2018 Cape Town summit, and were formalized into the Contribute program in 2019 because they received such a positive reception from attendees. The unconference sessions offer a break from work-related activities and allow team members to connect through games and shared interests. Many of the sessions bore tangible results, such as building a [blog post](/blog/day-in-the-life-remote-worker/) through a game of broken telephone to organizing more [volunteer opportunities](https://gitlab.com/gitlab-com/www-gitlab-com/issues/4437) for GitLab team members.\n\n### Workshops\n\nFormal workshops were introduced this year as a platform for knowledge transfer and exchange. Through these workshops, folks can learn more from their colleagues about different topics they are highly skilled at or use on a daily basis, such as [GitLab 101](https://docs.google.com/presentation/d/e/2PACX-1vTeGh5vq4yHk6NgzTDsKRGbf-NDwzQwRfjnr7jwqfce282h5k4C_xRGUOE1WWwxsj9rEg8Z5UGNT6aj/pub?start=false&loop=false&delayms=3000). Other workshops centered around implementing GitLab’s values – in a packed workshop about recognizing and mitigating unconscious bias we made improvements to the GitLab handbook.\n\n“There will be a lot more to choose from going forward and I think that’s a great change for the program as well,” says Kirsten. “There will basically be something for anyone at any time of day during Contribute.”\n\nFor Contribute 2020 and onward, we are going to introduce different types of sessions such as AMAs, team building, panels, and additional fireside chats.\n\n## Connection\n\nAt an all-remote company, the opportunity to get to know people in person is huge and often makes remote collaboration a bit easier. Attendees of Contribute 2019 reflect this sentiment in feedback shared with the corporate events team:\n\n>“In a fully remote company, the opportunity to meet people in person reinforces and deepens the relationship between the company in ways that are invaluable.”\n\n>“Face-to-face time is incredibly valuable in building strong working relationships.”\n\nJust a quick glance at a colleague's [contributions graph](/blog/how-do-you-contribute/) illustrates the depth of collaboration at GitLab, but the kinetic energy that propels these contributions is inspiring when we're all under one roof.\n\n### Getting new hires to Contribute\n\n“It’s really hard to imagine the size of GitLab, the speed that we work at, and the way that we work together if you haven’t seen everybody together,” says Kirsten. This is why the company decided it’s so important that we do everything possible to bring even our newest team members to Contribute.\n\nAt Contribute 2019, there was a group of 60-70 people who essentially signed their contracts and hopped on a plane to New Orleans, and even more who started maybe a week before the annual event kicked off.\n\nIn spite of it being a surreal first week, new team members largely felt the experience was more positive than disorienting: “As a completely new hire it was a great way to initially meet all the people I was going to be working with moving forward.”\n\n“That’s why we push for people to literally have their first day during these events because it really builds a stronger working relationship,” says Kirsten. “We don’t want people to miss out on that feeling for nine months or a year.”\n\nThe events team deliberately created more opportunities for team bonding with department happy hours and team dinners in New Orleans, and will continue to create more team-building events based on a number of requests that this practice continue.\n\n## Iterating our way to 2020\n\nThe motto for the corporate events team is live and learn, says Kirsten. Every year we discover new things that can make implementing the event easier from behind-the-scenes (e.g. booking the ballroom for two days to prepare for the keynote) and a better, more engaging experience for participants (e.g. including a break in the middle of the keynote so folks can stretch their legs).\n\nBased on feedback from a post-Contribute 2019 survey, Kirsten also plans on creating more “unplanned planned” events, such as karaoke or game nights in breakout rooms – activities that were always a feature of past summits but were usually self-organized among participants. But feedback from 2019 did show us that in bigger groups people want to know what they’re in for ahead of time.\n\nThere were also requests to gamify socialization or randomize seating at meal times so there are more opportunities for community members to meet and connect.\n\nWhen your baseline doubles each year, you’re going to have some growing pains while scaling such a huge, complicated event. But for Kirsten, seeing happy GitLabbers bounce through the hotel doors on arrivals day and overhearing the inevitable, “I didn’t know you were so tall!” at breakfast makes the effort worthwhile.\n\n“When the keynote kicks off on day 1 of Contribute, you can see everybody in one space for the first time since the last Contribute 9-12 months ago. I was standing near the doors during that moment in the keynote when all the doors closed, and I just looked around. Every time I see that I get goosebumps because it’s like, ‘Oh my goodness, this is so many more people than I had imagined it would look like!’”\n\nIf you're wondering where we'll go next, it's still a surprise but [keep an eye on our save the date](/events/gitlab-contribute/index.html) because the announcement should happen soon!\n",[9,832],{"slug":4030,"featured":6,"template":680},"how-we-scaled-our-summits","content:en-us:blog:how-we-scaled-our-summits.yml","How We Scaled Our Summits","en-us/blog/how-we-scaled-our-summits.yml","en-us/blog/how-we-scaled-our-summits",{"_path":4036,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4037,"content":4043,"config":4048,"_id":4050,"_type":14,"title":4051,"_source":16,"_file":4052,"_stem":4053,"_extension":19},"/en-us/blog/how-we-spent-two-weeks-hunting-an-nfs-bug",{"title":4038,"description":4039,"ogTitle":4038,"ogDescription":4039,"noIndex":6,"ogImage":4040,"ogUrl":4041,"ogSiteName":667,"ogType":668,"canonicalUrls":4041,"schema":4042},"How we spent two weeks hunting an NFS bug in the Linux kernel","Here's an in-depth recap of debugging a GitLab issue that culminated in a patch for the Linux kernel.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672173/Blog/Hero%20Images/nfs-bug-hunt-detective.jpg","https://about.gitlab.com/blog/how-we-spent-two-weeks-hunting-an-nfs-bug","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we spent two weeks hunting an NFS bug in the Linux kernel\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2018-11-14\",\n      }",{"title":4038,"description":4039,"authors":4044,"heroImage":4040,"date":4045,"body":4046,"category":743,"tags":4047},[2433],"2018-11-14","\n\nUPDATE 2019-08-06: This bug has now been resolved in the following\ndistributions:\n\n* [Red Hat Enterprise Linux 7](https://access.redhat.com/errata/RHSA-2019:2029)\n* [Ubuntu](https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1802585)\n* Linux mainline: Backported to [4.14-stable](https://lkml.org/lkml/2019/8/2/562) and [4.19-stable](https://lkml.org/lkml/2019/8/2/639)\n\nOn Sep. 14, the GitLab support team escalated a critical\nproblem encountered by one of our customers: GitLab would run fine for a\nwhile, but after some time users encountered errors. When attempting to\nclone certain repositories via Git, users would see an opaque `Stale\nfile error` message. The error message persisted for a long time,\nblocking employees from being able to work, unless a system\nadministrator intervened manually by running `ls` in the directory\nitself.\n\nThus launched an investigation into the inner workings of Git and the\nNetwork File System (NFS). The investigation uncovered a bug with the\nLinux v4.0 NFS client and culiminated with a [kernel patch that was written by\nTrond Myklebust](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h=be189f7e7f03de35887e5a85ddcf39b91b5d7fc1)\nand [merged in the latest mainline Linux kernel](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h=c7a2c49ea6c9eebbe44ff2c08b663b2905ee2c13)\non Oct. 26.\n\nThis post describes the journey of investigating the issue and\ndetails the thought process and tools by which we tracked down the\nbug. It was inspired by the fine detective work in [How I spent two\nweeks hunting a memory leak in Ruby](http://www.be9.io/2015/09/21/memory-leak/)\nby Oleg Dashevskii.\n\nMore importantly, this experience exemplifies how open source software\ndebugging has become a team sport that involves expertise across\nmultiple people, companies, and locations. The GitLab motto \"[everyone can\ncontribute](/company/mission/#mission)\" applies not only to GitLab itself, but also to other open\nsource projects, such as the Linux kernel.\n\n## Reproducing the bug\n\nWhile we have run NFS on GitLab.com for many years, we have stopped\nusing it to access repository data across our application\nmachines. Instead, we have [abstracted all Git calls to\nGitaly](/blog/the-road-to-gitaly-1-0/).\nStill, NFS remains a supported configuration for our customers who\nmanage their own installation of GitLab, but we had never seen the exact\nproblem described by the customer before.\n\n[Our customer gave us a few important clues](https://gitlab.com/gitlab-org/gitlab-ce/issues/51437):\n\n1. The full error message read, `fatal: Couldn't read ./packed-refs: Stale file handle`.\n2. The error seemed to start when they started a manual Git garbage\ncollection run via `git gc`.\n3. The error would go away if a system administrator ran `ls` in the\ndirectory.\n4. The error also would go away after `git gc` process ended.\n\nThe first two items seemed obviously related. When you push to a branch\nin Git, Git creates a loose reference, a fancy name for a file that\npoints your branch name to the commit. For example, a push to `master`\nwill create a file called `refs/heads/master` in the repository:\n\n```bash\n$ cat refs/heads/master\n2e33a554576d06d9e71bfd6814ee9ba3a7838963\n```\n\n`git gc` has several jobs, but one of them is to collect these loose\nreferences (refs) and bundle them up into a single file called\n`packed-refs`. This makes things a bit faster by eliminating the need to\nread lots of little files in favor of reading one large one. For\nexample, after running `git gc`, an example `packed-refs` might look\nlike:\n\n```\n# pack-refs with: peeled fully-peeled sorted\n564c3424d6f9175cf5f2d522e10d20d781511bf1 refs/heads/10-8-stable\nedb037cbc85225261e8ede5455be4aad771ba3bb refs/heads/11-0-stable\n94b9323033693af247128c8648023fe5b53e80f9 refs/heads/11-1-stable\n2e33a554576d06d9e71bfd6814ee9ba3a7838963 refs/heads/master\n```\n\nHow exactly is this `packed-refs` file created? To answer that, we ran\n`strace git gc` with a loose ref present. Here are the pertinent lines\nfrom that:\n\n```\n28705 open(\"/tmp/libgit2/.git/packed-refs.lock\", O_RDWR|O_CREAT|O_EXCL|O_CLOEXEC, 0666) = 3\n28705 open(\".git/packed-refs\", O_RDONLY) = 3\n28705 open(\"/tmp/libgit2/.git/packed-refs.new\", O_RDWR|O_CREAT|O_EXCL|O_CLOEXEC, 0666) = 4\n28705 rename(\"/tmp/libgit2/.git/packed-refs.new\", \"/tmp/libgit2/.git/packed-refs\") = 0\n28705 unlink(\"/tmp/libgit2/.git/packed-refs.lock\") = 0\n```\n\nThe system calls showed that `git gc` did the following:\n\n1. Open `packed-refs.lock`. This tells other processes that `packed-refs` is locked and cannot be changed.\n1. Open `packed-refs.new`.\n1. Write loose refs to `packed-refs.new`.\n1. Rename `packed-refs.new` to `packed-refs`.\n1. Remove `packed-refs.lock`.\n1. Remove loose refs.\n\nThe fourth step is the key here: the rename where Git puts `packed-refs`\ninto action. In addition to collecting loose refs, `git gc` also\nperforms a more expensive task of scanning for unused objects and\nremoving them. This task can take over an hour for large\nrepositories.\n\nThat made us wonder: for a large repository, does `git gc` keep the file\nopen while it's running this sweep? Looking at the `strace` logs and\nprobing the process with `lsof`, we found that it did the following:\n\n![Git Garbage Collection](https://about.gitlab.com/images/blogimages/nfs-debug/git-gc-diagram.svg)\n\nNotice that `packed-refs` is closed only at the end, after the potentially\nlong `Garbage collect objects` step takes place.\n\nThat made us wonder: how does NFS behave when one node has `packed-refs`\nopen while another renames over that file?\n\nTo experiment, we asked the customer to run the following experiment on\ntwo different machines (Alice and Bob):\n\n1. On the shared NFS volume, create two files: `test1.txt` and\n`test2.txt` with different contents to make it easy to distinguish them:\n\n    ```bash\n    alice $ echo \"1 - Old file\" > /path/to/nfs/test1.txt\n    alice $ echo \"2 - New file\" > /path/to/nfs/test2.txt\n    ```\n\n2. On machine Alice, keep a file open to `test1.txt`:\n\n    ```bash\n     alice $ irb\n     irb(main):001:0> File.open('/path/to/nfs/test1.txt')\n    ```\n\n3. On machine Alice, show the contents of `test1.txt` continuously:\n\n    ```bash\n    alice $ while true; do cat test1.txt; done\n    ```\n\n4. Then on machine Bob, run:\n\n    ```bash\n    bob $ mv -f test2.txt test1.txt\n    ```\n\nThis last step emulates what `git gc` does with `packed-refs` by\noverwriting the existing file.\n\nOn the customer's machine, the result looked something like:\n\n```\n1 - Old file\n1 - Old file\n1 - Old file\ncat: test1.txt: Stale file handle\n```\n\nBingo! We seemed to reproduce the problem in a controlled way. However,\nthe same experiment using a Linux NFS server did not have this\nproblem. The result was what you would expect: the new contents were\npicked up after the rename:\n\n```\n1 - Old file\n1 - Old file\n1 - Old file\n2 - New file  \u003C--- RENAME HAPPENED\n2 - New file\n2 - New file\n```\n\nWhy the difference in behavior? It turns out that the customer was using\nan [Isilon NFS\nappliance](https://www.dellemc.com/en-us/storage/isilon/index.htm) that\nonly supported NFS v4.0. By switching the mount parameters to v4.0 via\nthe `vers=4.0` parameter in `/etc/fstab`, the test revealed a different\nresult with the Linux NFS server:\n\n```\n1 - Old file\n1 - Old file\n1 - Old file\n1 - Old file \u003C--- RENAME HAPPENED\n1 - Old file\n1 - Old file\n```\n\nInstead of a `Stale file handle`, the Linux NFS v4.0 server showed stale\n*contents*. It turns out this difference in behavior can be explained by\nthe NFS spec. From [RFC\n3010](https://tools.ietf.org/html/rfc3010#page-153):\n\n> A filehandle may or may not become stale or expire on a rename.\n> However, server implementors are strongly encouraged to attempt to keep\n> file handles from becoming stale or expiring in this fashion.\n\nIn other words, NFS servers can choose how to behave if a file is\nrenamed; it's perfectly valid for any NFS server to return a `Stale file\nerror` when that happens. We surmised that even though the results were\ndifferent, the problem was likely related to the same issue. We\nsuspected some cache validation issue because running `ls` in the\ndirectory would \"clear\" the error. Now that we had a reproducible test\ncase, we asked the experts: the Linux NFS maintainers.\n\n## False path: NFS server delegations\n\nWith a clear set of reproduction steps, I [sent an email to the Linux\nNFS mailing list](https://marc.info/?l=linux-nfs&m=153721785231614&w=2)\ndescribing what we had found. Over the week, I went back and forth with\nBruce Fields, the Linux NFS server maintainer, who suggested this was a\nNFS bug and that it would be useful to look at the network traffic. He\nthought there might be an issue with NFS server delegations.\n\n### What is an NFS server delegation?\n\nIn a nutshell, NFS v4 introduced server delegations as a way to speed up file access. A server can\ndelegate read or write access to a client so that the client doesn't\nhave to keep asking the server whether that file has changed by another\nclient. In simpler terms, a write delegation is akin to someone lending\nyou a notebook and saying, \"Go ahead and write in here, and I'll take it\nback when I'm ready.\" Instead of having to ask to borrow the notebook\nevery time you want to write a new paragraph, you have free rein until\nthe owner reclaims the notebook. In NFS terms, this reclamation process\nis called a delegation recall.\n\nIndeed, a bug in the NFS delegation recall might explain the `Stale file\nhandle` problem. Remember that in the earlier experiment, Alice had\nan open file to `test1.txt` when it was replaced by `test2.txt` later.\nIt's possible that the server failed to recall the delegation on\n`test1.txt`, resulting in an incorrect state. To check whether this was\nan issue, we turned to `tcpdump` to capture NFS traffic and used\nWireshark to visualize it.\n\n[Wireshark](https://www.wireshark.org/) is a wonderful open source tool\nfor analyzing network traffic, and it's especially good for viewing NFS\nin action. We captured a trace using the following command on the NFS server:\n\n```\ntcpdump -s 0 -w /tmp/nfs.pcap port 2049\n```\n\nThis command captures all NFS traffic, which typically is on TCP port 2049.\nBecause our experiment worked properly with NFS v4.1 but did not\n with NFS v4.0, we could compare and contrast how NFS behaved\nin a non-working and a working case. With Wireshark, we saw the\nfollowing behavior:\n\n### NFS v4.0 (stale file case)\n\n![NFS v4.0 flow](https://about.gitlab.com/images/blogimages/nfs-debug/nfs-4.0-flow.svg)\n\nIn this diagram, we can see in step 1 Alice opens `test1.txt` and gets\nback an NFS file handle along with a `stateid` of 0x3000. When Bob\nattempts to rename the file, the NFS server tells to Bob to retry via\nthe `NFS4ERR_DELAY` message while it recalls the delegation from Alice\nvia the `CB_RECALL` message (step 3). Alice then returns her delegation\nvia `DELEGRETURN` (step 4), and then Bob attempts to send another\n`RENAME` message (step 5). The `RENAME` completes in both cases, but\nAlice continues to read using the same file handle.\n\n### NFS v4.1 (working case)\n\n![NFS v4.1 flow](https://about.gitlab.com/images/blogimages/nfs-debug/nfs-4.1-flow.svg)\n\nThe main difference happens at the bottom at step 6. Notice in NFS v4.0\n(the stale file case), Alice attempts to reuse the same `stateid`. In\nNFS v4.1 (working case), Alice performs an additional `LOOKUP` and\n`OPEN`, which causes the server to return a different `stateid`. In v4.0,\nthese extra messages are never sent. This explains why Alice continues\nto see stale content because she uses the old file handle.\n\nWhat makes Alice decide to do the extra `LOOKUP`? The delegation recall\nseemed to work fine, but perhaps there was still an issue, such as a\nmissing invalidation step. To rule that out, we disabled NFS delegations\nby issuing this command on the NFS server itself:\n\n```sh\necho 0 > /proc/sys/fs/leases-enable\n```\n\nWe repeated the experiment, but the problem persisted. All this\nconvinced us this wasn't a NFS server issue or a problem with NFS\ndelegations; it was a problem that led us to look into the NFS client\nwithin the kernel.\n\n## Digging deeper: the Linux NFS client\n\nThe first question we had to answer for the NFS maintainers:\n\n### Was this problem still in the latest upstream kernel?\n\nThe issue occurred with both CentOS 7.2 and Ubuntu 16.04 kernels, which\nused versions 3.10.0-862.11.6 and 4.4.0-130, respectively. However, both\nthose kernels lagged the most recent kernel, which was 4.19-rc2 at the\ntime.\n\nWe deployed a new Ubuntu 16.04 virtual machine on Google Cloud Platform\n(GCP), cloned the latest Linux kernel, and set up a kernel development\nenvironment. After generating a `.config` file via `make menuconfig`, we\nchecked two items:\n\n1. The NFS driver was compiled as a module (`CONFIG_NFSD=m`).\n2. The [required GCP kernel settings](https://cloud.google.com/compute/docs/images/building-custom-os)\nwere set properly.\n\nJust as a geneticist would use fruit flies to study evolution in\nreal time, the first item allowed us to make quick changes in the NFS\nclient without having to reboot the kernel. The second item was required\nto ensure that the kernel would actually boot after it was\ninstalled. Fortunately, the default kernel settings had all the settings\nright out of the box.\n\nWith our custom kernel, we verified that the stale file problem still\nexisted in the latest version. That begged a number of questions:\n\n1. Where exactly was this problem happening?\n2. Why was this problem happening with NFS v4.0 but not in v4.1?\n\nTo answer these questions, we began to investigate the NFS [source\ncode](/solutions/source-code-management/). Since we didn't have a kernel debugger available, we sprinkled the\nsource code with two main types of calls:\n\n1. `pr_info()` ([what used to be `printk`](https://lwn.net/Articles/487437/)).\n2. `dump_stack()`: This would show the stack trace of the current function call.\n\nFor example, one of the first things we did was hook into the\n`nfs4_file_open()` function in `fs/nfs/nfs4file.c`:\n\n```c\nstatic int\nnfs4_file_open(struct inode *inode, struct file *filp)\n{\n...\n        pr_info(\"nfs4_file_open start\\n\");\n        dump_stack();\n```\n\nAdmittedly, we could have [activated the `dprintk` messages with the\nLinux dynamic\ndebug](https://www.kernel.org/doc/html/v4.15/admin-guide/dynamic-debug-howto.html)\nor used\n[`rpcdebug`](https://www.thegeekdiary.com/how-to-enable-nfs-debug-logging-using-rpcdebug/),\nbut it was nice to be able to add our own messages to verify changes\nwere being made.\n\nEvery time we made changes, we recompiled the module and reinstalled it\ninto the kernel via the commands:\n\n```sh\nmake modules\nsudo umount /mnt/nfs-test\nsudo rmmod nfsv4\nsudo rmmod nfs\nsudo insmod fs/nfs/nfs.ko\nsudo mount -a\n```\n\nWith our NFS module installed, repeating the experiments would print\nmessages that would help us understand the NFS code a bit more. For\nexample, you can see exactly what happens when an application calls `open()`:\n\n```\nSep 24 20:20:38 test-kernel kernel: [ 1145.233460] Call Trace:\nSep 24 20:20:38 test-kernel kernel: [ 1145.233462]  dump_stack+0x8e/0xd5\nSep 24 20:20:38 test-kernel kernel: [ 1145.233480]  nfs4_file_open+0x56/0x2a0 [nfsv4]\nSep 24 20:20:38 test-kernel kernel: [ 1145.233488]  ? nfs42_clone_file_range+0x1c0/0x1c0 [nfsv4]\nSep 24 20:20:38 test-kernel kernel: [ 1145.233490]  do_dentry_open+0x1f6/0x360\nSep 24 20:20:38 test-kernel kernel: [ 1145.233492]  vfs_open+0x2f/0x40\nSep 24 20:20:38 test-kernel kernel: [ 1145.233493]  path_openat+0x2e8/0x1690\nSep 24 20:20:38 test-kernel kernel: [ 1145.233496]  ? mem_cgroup_try_charge+0x8b/0x190\nSep 24 20:20:38 test-kernel kernel: [ 1145.233497]  do_filp_open+0x9b/0x110\nSep 24 20:20:38 test-kernel kernel: [ 1145.233499]  ? __check_object_size+0xb8/0x1b0\nSep 24 20:20:38 test-kernel kernel: [ 1145.233501]  ? __alloc_fd+0x46/0x170\nSep 24 20:20:38 test-kernel kernel: [ 1145.233503]  do_sys_open+0x1ba/0x250\nSep 24 20:20:38 test-kernel kernel: [ 1145.233505]  ? do_sys_open+0x1ba/0x250\nSep 24 20:20:38 test-kernel kernel: [ 1145.233507]  __x64_sys_openat+0x20/0x30\nSep 24 20:20:38 test-kernel kernel: [ 1145.233508]  do_syscall_64+0x65/0x130\n```\n\nWhat are the `do_dentry_open` and `vfs_open` calls above? Linux has a\n[virtual filesystem\n(VFS)](https://www.kernel.org/doc/Documentation/filesystems/vfs.txt), an\nabstraction layer which provides a common interface for all\nfilesystems. The VFS documentation explains:\n\n> The VFS implements the open(2), stat(2), chmod(2), and similar system\n> calls. The pathname argument that is passed to them is used by the VFS\n> to search through the directory entry cache (also known as the dentry\n> cache or dcache). This provides a very fast look-up mechanism to\n> translate a pathname (filename) into a specific dentry. Dentries live\n> in RAM and are never saved to disc: they exist only for performance.\n\n### This gave us a clue: what if this was a problem with the dentry cache?\n\nWe noticed a lot of dentry cache validation was done in\n`fs/nfs/dir.c`. In particular, `nfs4_lookup_revalidate()` sounded\npromising. As an experiment, we hacked that function to bail\nout early:\n\n\n```diff\ndiff --git a/fs/nfs/dir.c b/fs/nfs/dir.c\nindex 8bfaa658b2c1..ad479bfeb669 100644\n--- a/fs/nfs/dir.c\n+++ b/fs/nfs/dir.c\n@@ -1159,6 +1159,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)\n        trace_nfs_lookup_revalidate_enter(dir, dentry, flags);\n        error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);\n        trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);\n+       goto out_bad;\n        if (error == -ESTALE || error == -ENOENT)\n                goto out_bad;\n        if (error)\n```\n\nThat made the stale file problem in our experiment go away! Now we were onto something.\n\nTo answer, \"Why does this problem not happen in NFS v4.1?\", we added\n`pr_info()` calls to every `if` block in that function. After running our\nexperiments with NFS v4.0 and v4.1, we found this special condition being run\nin the v4.1 case:\n\n```c\n        if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1) {\n          goto no_open;\n        }\n```\n\nWhat is `NFS_CAP_ATOMIC_OPEN_V1`? We saw [this kernel\npatch](https://patchwork.kernel.org/patch/2300511/) mentioned this was\nan NFS v4.1-specific feature, and the code in `fs/nfs/nfs4proc.c`\nconfirmed that this flag was a capability present in v4.1 but not in v4.0:\n\n```c\nstatic const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {\n        .minor_version = 1,\n        .init_caps = NFS_CAP_READDIRPLUS\n                | NFS_CAP_ATOMIC_OPEN\n                | NFS_CAP_POSIX_LOCK\n                | NFS_CAP_STATEID_NFSV41\n                | NFS_CAP_ATOMIC_OPEN_V1\n```\n\nThat explained the difference in behavior: in the v4.1 case, the `goto\nno_open` would cause more validation to happen in\n`nfs_lookup_revalidate()`, but in v4.0, the `nfs4_lookup_revalidate()`\nwould return earlier. Now, how do we actually solve the problem?\n\n## The solution\n\nI reported the [findings to the NFS mailing\nlist](https://marc.info/?l=linux-nfs&m=153782129412452&w=2) and proposed\n[a naive patch](https://marc.info/?l=linux-nfs&m=153807208928650&w=2). A\nweek after the report, Trond Myklebust sent a [patch series to the list\nfixing this bug and found another related issue for NFS\nv4.1](https://marc.info/?l=linux-nfs&m=153816500525563&w=2).\n\nIt turns out the fix for the NFS v4.0 bug was deeper in the code base\nthan we had looked. Trond summarized it well in the\n[patch](https://marc.info/?l=linux-nfs&m=153816500525564&w=2):\n\n> We need to ensure that inode and dentry revalidation occurs correctly\n> on reopen of a file that is already open. Currently, we can end up not\n> revalidating either in the case of NFSv4.0, due to the 'cached open'\n> path.  Let's fix that by ensuring that we only do cached open for the\n> special cases of open recovery and delegation return.\n\nWe confirmed that this fix made the stale file problem go away and filed\nbug reports with\n[Ubuntu](https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1802585)\nand [RedHat](https://bugzilla.redhat.com/show_bug.cgi?id=1648482).\n\nKnowing full well that kernel changes may take a while to make it to\nstable releases, we also added a [workaround in\nGitaly](https://gitlab.com/gitlab-org/gitaly/merge_requests/924) to deal\nwith this issue. We did experiments to test that calling `stat()` on the\n`packed-refs` file appears to cause the kernel to revalidate the dentry\ncache for the renamed file. For simplicity, this is implemented in\nGitaly regardless of whether the filesystem is NFS; we only do this once\nbefore Gitaly \"opens\" a repository, and there are already other `stat()`\ncalls that check for other files.\n\n## What we learned\n\nA bug can be anywhere in your software stack, and sometimes you have to\nlook beyond your application to find it. Having helpful partners in the\nopen source world makes that job much easier.\n\nWe are extremely grateful to Trond Myklebust for fixing the problem, and\nBruce Fields for responding to questions and helping us understand\nNFS. Their responsiveness and professionalism truly reflects the best of\nthe open source community.\n\nPhoto by [dynamosquito](https://www.flickr.com/photos/dynamosquito) on [Flickr](https://www.flickr.com/photos/dynamosquito/4265771518)\n{: .note}\n",[267,1297,9,745],{"slug":4049,"featured":6,"template":680},"how-we-spent-two-weeks-hunting-an-nfs-bug","content:en-us:blog:how-we-spent-two-weeks-hunting-an-nfs-bug.yml","How We Spent Two Weeks Hunting An Nfs Bug","en-us/blog/how-we-spent-two-weeks-hunting-an-nfs-bug.yml","en-us/blog/how-we-spent-two-weeks-hunting-an-nfs-bug",{"_path":4055,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4056,"content":4062,"config":4067,"_id":4069,"_type":14,"title":4070,"_source":16,"_file":4071,"_stem":4072,"_extension":19},"/en-us/blog/how-we-turned-40-person-meeting-into-a-podcast",{"title":4057,"description":4058,"ogTitle":4057,"ogDescription":4058,"noIndex":6,"ogImage":4059,"ogUrl":4060,"ogSiteName":667,"ogType":668,"canonicalUrls":4060,"schema":4061},"How we turned a dull weekly all-hands into a podcast","We love asynchronous communication so much that we turned a uninspiring department-wide meeting into an engaging podcast – here's why and how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671055/Blog/Hero%20Images/headphones-colorful-background.jpg","https://about.gitlab.com/blog/how-we-turned-40-person-meeting-into-a-podcast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we turned a dull weekly all-hands into a podcast\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lyle Kozloff\"}],\n        \"datePublished\": \"2019-06-03\",\n      }",{"title":4057,"description":4058,"authors":4063,"heroImage":4059,"date":4064,"body":4065,"category":808,"tags":4066},[1975],"2019-06-03","\nWe’ve all been there: A department all-hands. At GitLab, we’ve got them too. They’re important: There’s information you need to know, and there’s really only one way to handle it. While it’s true that we’re [all-remote](/company/culture/all-remote/), and everyone joins from their location of choice, they’re still:\n\n - Slow\n - Synchronous\n - Soul-sucking\n\nA few months ago, one of our Support Engineering Managers ([Lee](/company/team/#leematos)) proposed that we try and embrace our value of [efficiency](https://handbook.gitlab.com/handbook/values/#efficiency) and [transition this agenda-driven meeting into a pure agenda](https://gitlab.com/gitlab-com/support/support-team-meta/issues/1394), and remove the need for face-to-face communication.\n\nMaking a big transition like this did leave us with some concerns:\n\n### Synchronous meetings can be a chance for people to connect\n\nAt GitLab we recognize the value of getting to know one’s teammates. Employees are encouraged to schedule [coffee chats](/company/culture/all-remote/tips/#coffee-chats) throughout their time at GitLab to get to know one another. (In fact, we think it’s so important that it’s one of the key tasks in [new employee onboarding](https://gitlab.com/gitlab-com/people-group/employment-templates/-/blob/main/.gitlab/issue_templates/onboarding.md#all-gitlabbers) ) We even have a consistent small group of people many of us meet up with (on video) four days a week [to connect on a purely personal level](/handbook/communication/#breakout-call) built into the company calendar. These calls aren’t forced, but attendance is organic and inviting, because you will start to build connections. This is especially important in an all-remote organization.\n\nTeam-level meetings can also be an important time to sync up and have time to banter and share personalities. However, we noticed that as the room grew these interactions became less natural. Within the structure of the meeting we tried to correct this with process: Rotating meeting chairs, asking people to post a “Friday Song,” and including a specific meeting section called “Cheerful Banter.” It didn’t work.\n\nUltimately it was a subset of voices who felt comfortable participating in these ways. Meetings beyond a certain size appear to lose their value as a chance for connection. They were less a conversation and more an address. As a result, we felt that we’d have more results concentrating on other avenues for the support team to express themselves and get to know one another.\n\n>We tried rotating meeting chairs, asking people to post a “Friday Song,” and including a specific meeting section called “Cheerful Banter.” It didn’t work.\n\n### Synchronous meetings are a scheduled touchpoint\n\nWhile all of our meetings are recorded and can be watched after the fact, there’s still something about having a cadence to the week. If there’s a meeting every Friday, I know that my brain will be getting new information on Fridays.\n\nTransitioning to a meeting where there is no actual meeting left us with the challenge of making sure people read the document regularly.\n\nTo solve this, we have two touch points during the week: On Wednesdays we have an automated Slack reminder to put things in the document. On Fridays, we have an automated cut-off message that starts a Slack thread for discussion of the week's items. This structure gives a little bit of “rails” that really help package up the meeting.\n\n### Synchronous meetings (at GitLab) can be a chance to absorb while working on something else\n\nThere’s something about having the ability to turn off your camera (or watch the video after the fact). I, personally, enjoy having the space that being an inactive participant in a conversation allows. I’ll often chop vegetables, fold laundry, or go for a run while listening along.\n\nIn fact, this type of passive listening while working on something else is not discouraged at GitLab, in fact it’s [actively encouraged in our handbook](/handbook/communication/#video-calls).\n\nAs we discussed the idea of changing this meeting, we thought it would work best if there was a format that would be efficient and multi-channel. As a big fan of podcasts myself, I thought that the format might work well.\n\n### Putting it together\n\nIf you’re interested in the nitty gritty details, we’ve made a [workflow about how the podcast is actually put together](/handbook/support/workflows/how-to-WIR-podcast.html) in the Handbook.\n\n![Slackbot reminder](https://about.gitlab.com/images/blogimages/slackbot-week-in-review.png){: .shadow.medium.center}\nSlackbot reminds us to add content to the document every Wednesday\n{: .note.text-center}\n\nBriefly, one or more team members will first take a look at each of the links in a the \"Week in Review\" document and the surrounding narrative to build out a script. They'll next pull metrics from our dashboards surrounding our [performance indicators](https://about.gitlab.com/company/kpis/#engineering-kpis) and other numbers we're tracking, like the [number of pairing sessions](https://gitlab.com/gitlab-com/support/support-training/milestones/7). Finally, all together the final recording, mixing and exporting happens – all before 12:00pm PST when a Slackbot announces the release.\n\nAll said, in many ways the ‘new’ format mirrors the old. We still move issues forward, make announcements, thank one another, review our metrics, and tell personal stories. Managers still wax poetic about the things that managers wax poetic about. Team members (probably) still roll their eyes. The biggest difference is that we’ve compressed an hour of “chair time” for 40 people into 10-15 minutes of anything time. And the data is still shareable, and readable too. I call that a win/win/win.\n\nWant to hear what it actually sounds like? Check out our [Support Week in Review from May 31, 2019](https://drive.google.com/open?id=1irQgehSpD2lxxYHQoQh4gBsHnZQLLMj9).\n\nIn what ways can you more efficiently organize and disseminate information in your organization? Do you think a podcast would help? Let us know in the comments or tweet us [@gitlab](https://twitter.com/gitlab).\n\nPhoto by Matthieu A on Unsplash\n{: .note}\n",[811,677,9,832],{"slug":4068,"featured":6,"template":680},"how-we-turned-40-person-meeting-into-a-podcast","content:en-us:blog:how-we-turned-40-person-meeting-into-a-podcast.yml","How We Turned 40 Person Meeting Into A Podcast","en-us/blog/how-we-turned-40-person-meeting-into-a-podcast.yml","en-us/blog/how-we-turned-40-person-meeting-into-a-podcast",{"_path":4074,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4075,"content":4081,"config":4087,"_id":4089,"_type":14,"title":4090,"_source":16,"_file":4091,"_stem":4092,"_extension":19},"/en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives",{"title":4076,"description":4077,"ogTitle":4076,"ogDescription":4077,"noIndex":6,"ogImage":4078,"ogUrl":4079,"ogSiteName":667,"ogType":668,"canonicalUrls":4079,"schema":4080},"How we use GitLab to automate our monthly retrospectives","How one engineering team is using GitLab CI to automate asynchronous retrospectives, making collaboration across four continents a breeze.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670529/Blog/Hero%20Images/automate-retrospectives.jpg","https://about.gitlab.com/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use GitLab to automate our monthly retrospectives\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean McGivern\"}],\n        \"datePublished\": \"2019-03-07\",\n      }",{"title":4076,"description":4077,"authors":4082,"heroImage":4078,"date":4084,"body":4085,"category":743,"tags":4086},[4083],"Sean McGivern","2019-03-07","\n\nAs an [Engineering\nManager] at GitLab I spend most of\nmy working day using GitLab for a variety of tasks – from using [issue boards](/stages-devops-lifecycle/issueboard/) for team assignments, [epics](https://docs.gitlab.com/ee/user/group/epics/) for tracking longer-term initiatives, and [todos](https://docs.gitlab.com/ee/user/todos.html) and notifications to manage my own workflow.\n\nWe also use GitLab in a number of unconventional ways, so I wanted to share with you one interesting use case we've been experimenting with.\n\n[Engineering Manager]: /handbook/engineering/management/\n\n## GitLab stage group retrospectives\n\nEach [stage group](/stages-devops-lifecycle/) at GitLab has its [own retrospective], which then feeds into the\n[GitLab-wide retrospective] we have for each monthly release.\n\n[own retrospective]: /handbook/engineering/management/group-retrospectives/\n[GitLab-wide retrospective]: /handbook/engineering/workflow/#retrospective\n\nThe [Plan team](/handbook/engineering/development/dev/plan/) is fairly widely\ndistributed: we have people on four continents, and only two members of the team\nare even in the same country as each other. We wanted to try [asynchronous\ncommunication] wherever possible, so we used GitLab issues for [our\nretrospectives], too.\n\nA quick note on terminology: we say [team] to refer to a manager – like me – and\ntheir reports. We say [stage group] to refer to the people who work on a\nparticular [DevOps stage], even across multiple teams. The Plan stage group is\neven more widely distributed.\n{: .note}\n\n[team]: /company/team/structure/#team-and-team-members\n[stage group]: /company/team/structure/#stage-groups\n[DevOps stage]: /handbook/product/categories/#devops-stages\n[asynchronous communication]: /handbook/communication#internal-communication\n[our retrospectives]: https://gitlab.com/gl-retrospectives/plan/issues?label_name[]=retrospective\n\n## Automating retrospective issue creation\n\nCreating the retrospective issue was fast, but adding links to notable\nissues that we shipped or that slipped was time consuming and\ntedious. In the spirit of [xkcd 1319], I decided to automate it, so I\ncreated the [async-retrospectives] project. This project makes\nretrospective issue creation a hands-off process:\n\n[xkcd 1319]: https://xkcd.com/1319/\n[async-retrospectives]: https://gitlab.com/gitlab-org/async-retrospectives\n\n1. It uses [scheduled pipelines] to create an issue on the 1st of each\n   month. As our [development month] runs from the 8th to the 7th, this\n   is a little early, but it allows the team to jot down any thoughts\n   they have while they are still working on the release.\n\n   ![](https://about.gitlab.com/images/blogimages/how-we-used-gitlab-to-automate-our-monthly-retrospectives/scheduled-pipelines.png){: .shadow}\n2. The issue is created using the standard [GitLab API], using a [protected\n   variable] to hold the credentials.\n3. When we create the issue, we use [quick actions] to add the correct\n   labels and due date in a convenient way. (This is also possible\n   without quick actions, but quick actions are more convenient for me\n   personally.)\n4. Another scheduled pipeline runs on the 9th of each month to update\n   the existing issue's description with the lists of issues (slipped,\n   shipped) I mentioned above.\n\n   We make our retrospectives public after we conclude them, so you can see this\n   in action on the [11.8 Plan retrospective]:\n\n   [![](https://about.gitlab.com/images/blogimages/how-we-used-gitlab-to-automate-our-monthly-retrospectives/11-8-plan-retrospective.png){: .shadow}][11.8 Plan retrospective]\n\n[scheduled pipelines]: https://docs.gitlab.com/ee/ci/pipelines/schedules.html\n[development month]: /handbook/engineering/workflow/#product-development-timeline\n[GitLab API]: https://docs.gitlab.com/ee/api/\n[protected variable]: https://docs.gitlab.com/ee/ci/variables/#protected-variables\n[quick actions]: https://docs.gitlab.com/ee/user/project/quick_actions.html\n[11.8 Plan retrospective]: https://gitlab.com/gl-retrospectives/plan/issues/22\n\nI only intended this for use in Plan, but a nice thing about a company where we\n[give agency] to people to solve their problems is that people like me are able\nto try out things that might not work globally, like this.\n\nAs it happened, it's also been [picked up by other teams and groups]. We\nconfigure the creation in a [YAML file], just like GitLab CI is configured, to\ntry to make it as easy as possible for other managers to contribute and set this\nup for their team.\n\n[give agency]: https://handbook.gitlab.com/handbook/values/#give-agency\n[picked up by other teams and groups]: https://gitlab.com/gitlab-org/async-retrospectives/merge_requests?state=merged\n[YAML file]: https://gitlab.com/gitlab-org/async-retrospectives/blob/master/teams.yml\n\n## Our experience running asynchronous retrospectives\n\n### What works\n\nWe've had a lot of positive experiences from these asynchronous\nretrospectives. In particular:\n\n1. No one is disadvantaged because of their time zone. If we had a video call\n   with our time zone spread, we'd have some people on that call in the middle of\n   their night, or missing out completely.\n2. Because they are written down from the start, and because comments in GitLab\n   are linkable, we can very easily refer to specific points in the future.\n3. Also, because they are written down, the comments can include links to\n   specific issues and merge requests to help other people get the same context.\n\n### What needs improvement\n\nAsynchronous retrospectives aren't perfect, of course. Some of the downsides\nwe've noticed are:\n\n1. Video calls are simply better for some things. In particular, the discussion\n   does not flow as smoothly in text as it can in a verbal conversation.\n\n   We also conduct our [engineering-wide retrospective] in a [public video\n   call], so we retain some opportunity for synchronous discussion.\n2. Similarly, team bonding is slower in text than in video calls.\n3. Participation can be lower if it's something you don't have to do right now,\n   but can always defer to a later date. We are continually [looking for ways to improve\n   this].\n\nOver all, we don't intend to go back to video calls for retrospectives,\nand we're really happy with the results. You can see all public\nretrospectives from the teams and groups at GitLab in the [GitLab\nretrospectives group on GitLab.com].\n\n[engineering-wide retrospective]: https://docs.google.com/document/d/1nEkM_7Dj4bT21GJy0Ut3By76FZqCfLBmFQNVThmW2TY/edit\n[public video call]: /2017/02/14/our-retrospective-and-kickoff-are-public/\n[looking for ways to improve this]: https://gitlab.com/gitlab-org/async-retrospectives/issues/12\n[GitLab retrospectives group on GitLab.com]: https://gitlab.com/gl-retrospectives\n\nPhoto by [Daniele Levis Pelusi](https://unsplash.com/photos/Pp9qkEV_xPk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/automation?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[109,811,9,723],{"slug":4088,"featured":6,"template":680},"how-we-used-gitlab-to-automate-our-monthly-retrospectives","content:en-us:blog:how-we-used-gitlab-to-automate-our-monthly-retrospectives.yml","How We Used Gitlab To Automate Our Monthly Retrospectives","en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives.yml","en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives",{"_path":4094,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4095,"content":4101,"config":4106,"_id":4108,"_type":14,"title":4109,"_source":16,"_file":4110,"_stem":4111,"_extension":19},"/en-us/blog/how-we-used-gitlab-values-to-build-a-security-awards-program",{"title":4096,"description":4097,"ogTitle":4096,"ogDescription":4097,"noIndex":6,"ogImage":4098,"ogUrl":4099,"ogSiteName":667,"ogType":668,"canonicalUrls":4099,"schema":4100},"How we used GitLab values to develop a successful Security Awards Program","We built a program that encourages, recognizes, and awards a shared responsibility for security.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681989/Blog/Hero%20Images/security-awards-blog.png","https://about.gitlab.com/blog/how-we-used-gitlab-values-to-build-a-security-awards-program","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used GitLab values to develop a successful Security Awards Program\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Philippe Lafoucrière\"}],\n        \"datePublished\": \"2021-05-14\",\n      }",{"title":4096,"description":4097,"authors":4102,"heroImage":4098,"date":4103,"body":4104,"category":720,"tags":4105},[2253],"2021-05-14","\n\nSecurity is our [top priority](/handbook/product/product-processes/#prioritization) at GitLab, and like many software companies, we believe security \"is everyone's responsibility\". The more GitLab team and community members are involved, the better. However, we're also scaling quickly, delivering new and large features often and need to stay focused and aligned with our value of [results](https://handbook.gitlab.com/handbook/values/#results).\n\nBecause of this focus and pace, blind spots can develop when it comes to security, so extra hands, minds, and eyes bring immense value. All security contributions to our documentation, product, and workflow are \"actions\" we want to recognize and programs that celebrate those who go the extra-mile, think out-of-the-box, or cautiously assess threats and risk, are a great reminder that everyone can contribute to the ongoing effort that is security. This is why we created the [Security Awards Program](/handbook/security/security-awards-program.html).\n\n## How we built a Security Awards Program using GitLab values\n\nThe program, opened in 2020, is a simple construct: Every valid submission (or action) earns the reporter points and recognition, and prizes are awarded at the end of each quarter. All non-Security team members and community members are eligible to win a grand prize at the end of the year, where the individual with the highest number of points is awarded the top prize.\n\n### Efficiency: Start boring\n\nAt GitLab, every new project is an opportunity to live and apply our values. My personal favorite, [efficiency](https://handbook.gitlab.com/handbook/values/#efficiency), helped us start with a boring solution. The minimum achievement to start the program was to define a basic rule, and document it in our handbook. Nothing more. From this [initial merge request](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/59279), we kicked off program communications and experimented with the first results. The feedback loop was extremely short, and adjustments were made accordingly.\n\n### Iteration: Improve it as you go along\n\nBecause a successful program needs to scale with time, [iteration](https://handbook.gitlab.com/handbook/values/#iteration) is key to maintain momentum and quickly improve. The first security award nominations arrived soon after we added the concept of the program to our handbook. To easily keep track of them and enable quick updates, we created a simple markdown file hosted in a specific project. While everything could have stayed the same, we knew automation would help us avoid human errors and ensure the program would scale. [Work done this past quarter](https://gitlab.com/groups/gitlab-com/gl-security/-/epics/105) means the nominations are now fetched weekly, the associated data updated and validated, and everything is published automatically.\n\nAnother recent iteration in our Security Awards Program is the move to [automatically reward security merge requests](/handbook/security/security-awards-program.html#automatic-rewards) (merged) that fix a security bug. Our product is not exempt from bugs or security issues and we saw the number of S3s and S4s (learn more about how we apply [severity labels](/handbook/security/#severity-and-priority-labels-on-security-issues)) rising lately.  Adding automatic rewards to target and incentivize identifying these security issues is predictable, simple to employ, and helps us reduce security bugs.\n\n### Collaboration: Everyone adds value\n\nTo be successful, we knew we needed a thriving program that enabled [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) across the organization and beyond. We work with our AppSec team to identify the initiatives we want to encourage and incentivize, as well as on the overall evaluation of submitted \"actions\". The workflow here is simple: Once it is identified, an \"action\" (an issue or a merge request) is labeled with a \"nomination\" label. Every week, the nominations are imported into a single awards council issue in GitLab for asynchronous discussion. Each nomination is a [thread](https://docs.gitlab.com/ee/user/discussions/) in the council issue, and we use [award emojis](https://docs.gitlab.com/ee/user/award_emojis.html) to set the number of votes. Votes translate to points awarded to the author of the \"action\".\n\n![Screenshot of GitLab Bot message in issue](https://about.gitlab.com/images/blogimages/gitlab-bot-awards-message.png){: .shadow.medium.center}\nWe engage with nominees when they're awarded, extending the visibility of the program and providing an instant feedback loop.\n{: .note.text-center}\n\n### Diversity, inclusion and belonging: New, better ideas\n\nRemember that at GitLab, everyone can contribute. Contributions from the wider GitLab community are essential to maintaining the level of security we expect for our product. The broad and diverse talents of the global GitLab community and our [diversity, inclusion, and belonging](https://handbook.gitlab.com/handbook/values/#diversity-inclusion) value drive inclusivity into this program and we're proud that [community contributions](/handbook/security/security-awards-program.html#community-contributions) play a key role in this program. We also have multiple categories for submissions to encourage participation from engineers and non-engineers alike to ensure we have diversity of thought and innovation.\n\n#### 👉 We want your contributions! 🙌\nThere are multiple ways to contribute and you can see them outlined in this [contribution guide](/community/contribute/). Any [actions that contribute to the security of GitLab](/handbook/security/security-awards-program.html#eligible-actions) are considered and have the potential to be recognized in our Security Awards Program.\n\nNote: For bug bounty hunters interested in researching security vulnerabilities on our platform,  we have a [bug bounty program on HackerOne](https://hackerone.com/gitlab?type=team) where security researchers are invited to submit security bug reports directly for bounties. Those submissions are not considered under this program, but are still really important to us.\n\n### Results: Security fixes and awareness\n\nThis one is easy. The more bugs we spot and fix, the stronger our product is for our customers, the broader community, and our own teams, who use GitLab daily. Beyond this, the Security Awards Program is a great way to spread knowledge about what we're prioritizing on the Security team and the GitLab issues we use for awards council voting and discussion are a nice weekly resource to generate awareness of changes that matter!\n\n### Transparency: Increases visibility and collaboration\n\nThe final GitLab value at play here is – last but not least – [transparency](https://handbook.gitlab.com/handbook/values/#transparency). It's been widely acknowledged that transparency and security don't always easily mix. And, sure, we admit it's more difficult, but not impossible. Our Security Awards Program is meant to be as transparent as possible, while ensuring no confidential information is leaked through our pipelines. We also try to [dogfood as much as we can](/handbook/product/product-processes/#dogfood-everything) here, so the transparency around this program presents a great opportunity to experiment with our new [threat modeling process](/handbook/security/threat_modeling/). This careful review allows us to keep the source code open and make the whole process available in the handbook. While the \"actions\" rewarded are often confidential since they are related to vulnerabilities or security issues, the [leaderboard with the awarded people](/handbook/security/awards/leaderboard-fy22.html) is completely public.\n\nOur journey to recognize security initiatives is just getting started. Fleshed out in the spirit of our values, our Security Awards Program is showing constant progress and results, leading to security awareness, engagement, and a more secure organization and product.\n\n## Congrats and thank you to our current top 10 contributors 🎉 :\n\n| Contributor | Rank in their category | Points |\n| [@cablett](/company/team/#cablett) | 1 | 600 |\n| [@alexkalderimis](/company/team/#alexkalderimis) | 2 | 500 |\n| [@engwan](/company/team/#engwan) | 3 | 480 |\n| [@whaber](/company/team/#whaber) | 4 | 400 |\n| [@alexpooley](/company/team/#alexpooley) | 5 | 400 |\n| [@theoretick](/company/team/#theoretick) | 6| 400 |\n| [@sabrams](/company/team/#sabrams) | 7 | 300 |\n| [@tmaczukin](/company/team/#tmaczukin) | 8 | 300 |\n| [@nolith](/company/team/#nolith) | 1 | 300 |\n| [@emanuele.divizio](https://gitlab.com/emanuele.divizio) | 1 | 300 |\n\nHow do you reward and recognize security fixes in your organization? Is there something more or different we could do in our Security Awards Program? Tell us in the comments!\n",[720,9,745],{"slug":4107,"featured":6,"template":680},"how-we-used-gitlab-values-to-build-a-security-awards-program","content:en-us:blog:how-we-used-gitlab-values-to-build-a-security-awards-program.yml","How We Used Gitlab Values To Build A Security Awards Program","en-us/blog/how-we-used-gitlab-values-to-build-a-security-awards-program.yml","en-us/blog/how-we-used-gitlab-values-to-build-a-security-awards-program",{"_path":4113,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4114,"content":4120,"config":4124,"_id":4126,"_type":14,"title":4127,"_source":16,"_file":4128,"_stem":4129,"_extension":19},"/en-us/blog/how-we-utilize-user-stories-as-a-collaborative-design-tool",{"title":4115,"description":4116,"ogTitle":4115,"ogDescription":4116,"noIndex":6,"ogImage":4117,"ogUrl":4118,"ogSiteName":667,"ogType":668,"canonicalUrls":4118,"schema":4119},"Improving iteration and collaboration with user stories","From problem validation to implementation, here's the Release Management team workflow for building user-centered features at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681193/Blog/Hero%20Images/blog-user-stories.jpg","https://about.gitlab.com/blog/how-we-utilize-user-stories-as-a-collaborative-design-tool","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Improving iteration and collaboration with user stories\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rayana Verissimo\"}],\n        \"datePublished\": \"2020-03-27\",\n      }",{"title":4115,"description":4116,"authors":4121,"heroImage":4117,"date":3306,"body":4122,"category":698,"tags":4123},[695],"\n\n{::options parse_block_html=\"true\" /}\n\n\n\nIncorporating UX into Agile practices is a known challenge on software projects. One of the most common rants from design teams is how Agile does not embrace UX. \n\nGitLab's focus on [results](https://handbook.gitlab.com/handbook/values/#results) empowers us to create processes that work best for us, our organization, constraints, and opportunities. That includes applying Agile principles to efficiently approach how we design our product.\n\nOn GitLab's Release Management team, we rely on [User stories](https://en.wikipedia.org/wiki/User_story) to help Product Designers, Product Managers, and Engineers understand how the features we prioritize affect our end users. \n\nBy expressing one very specific need that a persona has in the format of a user story, our team has seen the following benefits:\n\n1. Keep the scope of feature proposals minimal, while focusing on users instead of solutions.\n1. Engage in a conversation with our engineering team and stakeholders, so they can help raise technical constraints and more easily estimate implementation effort.\n1. Provide an essential foundation for the next phases of design.\n1. Proactively identify follow-up stories to iterate on.\n\n## We base our user stories on real data\n\nTo ensure we're building GitLab features that are relevant to our target market, as a design practitioner, I partner up with [Jackie](https://gitlab.com/jmeshell) (Product Manager) and [Lorie](https://gitlab.com/loriewhitaker) (UX Researcher) to talk to our users, identify patterns, determine priorities, and translate research into actionable insights. \n\nUser stories are just another way to articulate the user insights into the features we prioritize. [Problem validation](https://about.gitlab.com/handbook/product-development-flow/#validation-track) is also key to building strong user stories that will deliver a great user experience. \n\nRemember: great user stories are informed by real user insights! There is no other way to achieve [user-centered design](https://en.wikipedia.org/wiki/User-centered_design).\n\n## How a user story was turned into an MVC for Deploy Freezes\n\nWhen we started talking about the value of being able to restrict deployment time frames and declare accepted windows of code releases in enterprise and regulated industries, our Product Manager turned to customer interviews to [validate our assumptions](https://gitlab.com/gitlab-org/gitlab/issues/39108). We surveyed more than 200 participants and interviewed five customers about what we call [Deploy Freezes](https://gitlab.com/gitlab-org/gitlab/issues/39108). As a result, we were able to understand the interplay of Deploy Freezes with Release Runbooks, as well as how this problem impacts the GitLab stages of Secure and Plan.\n\n### What we learned from problem validation\n\nWe learned that for teams that are not global, the ability to halt [CI/CD deployments](/topics/ci-cd/) in off hours or suspend CI/CD pipelines on weekends when there is limited team availability can be critical. These users were usually configuring Deploy Freeze policies manually or outside of the GitLab system. Interlocking these policies within CI/CD and automation is a must have to support our users. \n\nAdditionally, when looking at [dogfooding](https://handbook.gitlab.com/handbook/values/#dogfooding) Deploy Freezes, our internal customers (Production and Delivery teams at GitLab) may need to support users freezing code deploys as they relate to special events like big company announcements, live streamed content, and holidays.\n\nThe main difference between this and a pipeline implementer for a `.gitlab-ci.yml` file is that the authors of these kinds of pipelines are much less technical, and even editing yaml might be a challenge — though they will still need to understand markdown. The personas responsible for enforcing those policies include: [Release managers (Rachel)](https://about.gitlab.com/handbook/product/personas/#rachel-release-manager), [DevOps Engineers (Devon)](https://about.gitlab.com/handbook/product/personas/#devon-devops-engineer), [Software Developers (Sasha)](https://about.gitlab.com/handbook/product/personas/#sasha-software-developer), and [Development Team Leads (Delaney)](https://about.gitlab.com/handbook/product/personas/#delaney-development-team-lead).\n\nThrough research, we identified that our next focus areas should be setting Deploy Freezes in the UI on a project instance level (and eventually on a group level), having a dashboard to report across environments, and logging failed attempts for deployments during a freeze window.\n\n## From user insights to user stories\n\nAs I start working on the design phase of the [MVC](/handbook/product/product-principles/#the-minimal-viable-change-mvc), I'll break my tasks into the following steps:\n\n1. Work with my Product Manager to identify the main user story.\n1. Identify possible scenarios and edge cases with the entire team.\n1. Write acceptance criteria for user stories and create a low-fidelity prototype.\n1. Discuss and iterate with the team, and move the prototype to high fidelity.\n\nThe typical format of a user story is a single sentence: “_As a [type of user], I want to [goal], so that [benefit]_”. Toward the beginning of the design phase, I documented this user story:\n\n> As a DevOps Engineer/Release Manager, I want to specify windows of time when deployments are not allowed for an environment with my GitLab project, so that I can interlock it within CI/CD and automation.\n\nLooking back at the extensive research our Product Manager conducted, as a designer, I am confident that my user story focuses on the right persona and that the scope covers one specific use case (specifying Deploy Freezes on a project level). All other scenarios, such as dashboard, reporting, group level configuration, and auditing are too large for the MVC. This user story also talks about what we are going to build, not how we are going to do it.\n\nIt is important to highlight that for some organizations, the DevOps Engineer and Release Manager personas [merge into one](https://gitlab.com/gitlab-org/ux-research/issues/346), where parties responsible for releases also need to keep a hand in development, creating code and contributing to applications, outside of automation. This is one of the reasons why when designing for Release Management, I need to remember our users might have different levels of familiarity and expectations with developer-centered flows.\n\n## Thinking big as a team\n\nI carefully consider the development proposal and team conversations to understand the goals and constraints that can inform my initial user story. Every single member of the Release Management team is design minded, so collaborating to improve my design proposals is usually a no brainer!\n\nInstead of collecting user stories in our backlog, we groom and refine them on the fly during the planning phase. Being an all-remote company requires some adjustments to my design process, and our team tries to work as asynchronously as possible. \n\nWe tackle collaboration in many different ways: by using issue threads, during Think Big sessions, PM/UX sync calls, 1:1s, and via Slack messages. The most important thing is that every design and technical decision we make is incorporated back into a single source of truth ([SSOT](https://docs.gitlab.com/ee/development/documentation/styleguide/#documentation-is-the-single-source-of-truth-ssot)) -- in our case, the MVC issue. \n\nUpdating the scope and acceptance criteria of issues is a shared responsibility between my Product Manager, the Engineers, and me. We do our best to collect all relevant information, so that our customers and counterparts can have a clear understanding of what we are delivering in the next milestone.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/SU9mqOUSl1k\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\nOur team (UX, FE, BE, PM) met to discuss the latest decisions made and discoveries about deploy freezes. Watch on [GitLab Unfiltered](https://youtu.be/SU9mqOUSl1k).\n{: .note.text-center}\n\n## Defining the acceptance criteria\n\nDuring our refinement process, I learned from engineering that we could specify timeboxes for the freezes using cron syntax, and that we could reuse this from the existing [Pipeline schedules](https://docs.gitlab.com/ee/ci/pipelines/schedules.html) capability. Hooray! Engineers also proposed that the `gitlab-ci.yml` file would instruct the pipeline to not run per the cron syntax. They also thought that it would be great to automatically retry the job and continue the deployment process when the freeze period was over.\n\nOn the design side, my additional proposal focused on [notifying users on the interface](https://gitlab.com/gitlab-org/gitlab/-/issues/24295#note_278661935) when a freeze period would be enabled for specific environments. This was especially important to inform users that are consuming the deployments, and not necessarily the people involved in setting up Deploy Freezes.\n\nThis is how one of the first versions of the MVC looked like:\n\n![Initial take on depoy freeze acceptance criteria](https://about.gitlab.com/images/blogimages/user-stories-as-design-tool/blog-deploy-freeze-iteration.jpg){: .large.center}\n\nHigh-level acceptance criteria including some interaction and requirements for frontend. No high-fidelity prototyping at this point.\n{: .note.text-center}\n\n![Initial take on depoy freeze prototype](https://about.gitlab.com/images/blogimages/user-stories-as-design-tool/blog-deploy-freeze-prototype.jpg){: .large.center}\n\nA _quick-and-dirty_ mockup produced by manipulating the source and styles of the Environments page on the browser.\n{: .note.text-center}\n\nAlthough engineering wanted to support the ability to configure everything related to a Deploy Freeze using the `gitlab-ci.yml` file at some point, while being able to retry a pipeline or even bypass a freeze period, we had significant data showing that it would be more valuable to users to instead have the ability to easily configure their policies using the UI. Our user insights told us that users configuring these kinds of pipelines are much less familiar with the terminal, and even editing yaml might be a challenge.\n\n## Iterate, iterate, iterate\n\nBack to the drawing board (or, in my case, the GitLab issue). My focus shifted from simply notifying users to allowing them to enter data in the UI. We worked asynchronously on [different proposals](https://gitlab.com/gitlab-org/gitlab/-/issues/24295#note_298796163), until the point the scope could fit an MVC that satisfies the user goals we identified through research.\n\nUsing the same user story, the acceptance criteria shifted to:\n\n![Deploy freeze mvc user story](https://about.gitlab.com/images/blogimages/user-stories-as-design-tool/blog-deploy-freeze-user-story-iteration.jpg){: .large.center}\n\nWhile working on the new acceptance criteria, I started raising a couple of questions and [thinking of edge cases](https://gitlab.com/gitlab-org/gitlab/-/issues/24295#note_308692974). For example, \"can we use a date picker UI component to select the deploy freeze period?\", \"will the end freeze field always be mandatory when setting a new period?\", \"can we support the user's default browser timezone when showing a dropdown on the UI?\", and \"how do we validate the cron syntax on the frontend?\". \n\nOnce again, Engineers to the rescue! [Nathan](https://gitlab.com/nfriend), our Super-Frontend Engineer, and I had a quick call where I walked him through my low-fidelity prototypes and we aligned our goals. Our decisions were documented as a [comment on the MVC](https://gitlab.com/gitlab-org/gitlab/-/issues/24295#note_308765612) to ensure everyone involved could access the information.\n\n## Break the user story down into something even smaller\n\nMy conversation with Nathan also made it clear that editing and deleting a deployment freeze using the UI, as well as showing human-readable cron syntax descriptions, would increase the MVC scope. Because of that, I proactively broke the user story down into four new user stories that were logged as new feature proposals:\n\n> As a user, I want to see human-readable cron syntax descriptions for deploy freezes in GitLab's UI, so that I can easily understand the information about a freeze. [gitlab#212458](https://gitlab.com/gitlab-org/gitlab/-/issues/212458)\n\n> As a DevOps Engineer/Release Manager, I want to edit Deploy Freezes I specify, so that I can keep my information up to date. [gitlab#212449](https://gitlab.com/gitlab-org/gitlab/-/issues/212449)\n\n> As a DevOps Engineer/Release Manager, I want to delete deployment freezes I specified, so that I can keep my information up to date. [gitlab#212451](https://gitlab.com/gitlab-org/gitlab/-/issues/212451)\n\n> As a user, I want to be informed when a Deploy Freeze is active for my project in GitLab, so that I can stay up to date with the status of production deployments. [gitlab#212460](https://gitlab.com/gitlab-org/gitlab/-/issues/212460)\n\nBy reducing the scope of the MVC, the Product Manager and Engineers could start a new conversation about delivery efforts:\n\n![Async discussion and frontend estimation of the MVC](https://about.gitlab.com/images/blogimages/user-stories-as-design-tool/blog-deploy-freeze-breakdown.jpg){: .large.center}\n\nAsync discussion and frontend estimation of the MVC. Read more on [gitlab#24295](https://gitlab.com/gitlab-org/gitlab/-/issues/24295#note_311365886)\n{: .note.text-center}\n\nI then placed the descoped stories in the “backlog” for short-term assignment and long-term reference. By having our Product Manager serve as gatekeeper to the backlog, our team can focus on working on high-value features that have already been vetted and are supported by user insights.\n\n## Prototyping with a focus\n\nWith everyone on board, I can finally spend proper time prototyping the MVC solution! 🎉\n\nI personally am a fan of spending more time writing down design specifications than pushing pixels. Because [the GitLab train is always moving](https://about.gitlab.com/releases), prototyping is costly and prone to becoming obsolete in the blink of an eye. I also try to be mindful when I need to provide a prototype to my team. Will it help them understand my proposal? Can the prototype unlock hidden edge cases I didn't account for? Do I work with people that need visual cues to better understand the design goals?\n\n![Final depoy freeze prototypes](https://about.gitlab.com/images/blogimages/user-stories-as-design-tool/blog-deploy-freeze-prototype-iteration.jpg){: .large.center}\n\nFinal high-fidelity prototypes used by the engineering team to estimate the MVC. Adjustments of UI copy are aligned on the fly with Technical writers after this phase.\n{: .note.text-center}\n\nPreviously, [Pedro](https://gitlab.com/pedroms) shared how one of the designer’s responsibilities is [handing off the design to developers](https://about.gitlab.com/blog/how-gitlab-pages-made-our-sketch-design-handoffs-easier-and-faster/), so that it gets implemented as intended. I trust that my frontend team will follow the acceptance criteria, for example, by reusing the Pajamas components I specified. And if by any chance they need to make changes/improvements to the design proposal on the fly: so be it!\n\nThe prototypes I build facilitate the design/development conversation, and they are meant to be used as assets to help our engineers have a starting point to build features. Prototypes are not the end product! Because I am also added as a UX reviewer to frontend merge requests, I can spot inconsistencies under development and discuss the proposed changes on the fly with the team. Once we agree on a direction, and if the change is big enough to be noted on the scope of our MVC, I make sure the information is updated in the SSOT.\n\n## Five keys takeaways from our workflow\n\n1. **You don't need to do Agile to be agile (lower case _a_).** Work around implementing best practices that work for you and your team.\n1. **Communicate with your team early and often.** As a tool, user stories help facilitate the conversation between UX, Research, Engineering, and Product. Look at the user stories to estimate design and development effort.\n1. **Identify user stories before jumping into designing a \"solution.\"** Make an effort to use research insights to guide your decisions. Deliver on real user needs. If user data is not available, try looking into different [research methods](https://about.gitlab.com/handbook/product/ux/ux-research/#research-methods). \n1. **Play around with the acceptance criteria.** For each user story, see if it can be broken down into smaller, more specific stories.\n1. **Document, iterate, and validate your decisions.** \n\n## Where do we go from here?\n\nAs with everything we do, this process is in constant change. User stories have been a great ally to promote a shared vision of the end user, while shifting the way we ship solutions. We went from having a list of functionalities with dubious origins that simply focused on solutions to having user-centered proposals that clearly let us communicate _why_ we are building things and _how_ we want to help our users achieve their goals.\n\nI am beyond excited about the relationship the Release Management team built around design and research. We are confident with the solution proposed for Deploy Freezes, but further developments may require [solution validation](https://about.gitlab.com/handbook/product-development-flow/#validation-phase-4-solution-validation) to test the usability of our prototypes and implemented features. Personally, I would still like to uncover more opportunities to contribute to [gitlab-ui](https://gitlab.com/gitlab-org/gitlab-ui) components and the [Pajamas Design System](https://design.gitlab.com/) through our user stories, so that we can come up with additional improvements to patterns that are used globally across GitLab.\n\nIf any of these topics interest you or if you have some feedback on our ideas, please get in touch and let us know what you think. We are planning great things for Release Management, in particular [Release Orchestration](https://about.gitlab.com/direction/release/release_orchestration/) with GitLab. \n\nYou can get to know more about the [Release UX Team Strategy](https://about.gitlab.com/handbook/product/ux/stage-group-ux-strategy/release/) in our handbook! We would love to hear from you!\n\nCover image by [Christina @ wocintechchat.com](https://unsplash.com/@wocintechchat?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[700,1698,811,9,1698],{"slug":4125,"featured":6,"template":680},"how-we-utilize-user-stories-as-a-collaborative-design-tool","content:en-us:blog:how-we-utilize-user-stories-as-a-collaborative-design-tool.yml","How We Utilize User Stories As A Collaborative Design Tool","en-us/blog/how-we-utilize-user-stories-as-a-collaborative-design-tool.yml","en-us/blog/how-we-utilize-user-stories-as-a-collaborative-design-tool",{"_path":4131,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4132,"content":4138,"config":4144,"_id":4146,"_type":14,"title":4147,"_source":16,"_file":4148,"_stem":4149,"_extension":19},"/en-us/blog/how-were-improving-self-managed-billing",{"title":4133,"description":4134,"ogTitle":4133,"ogDescription":4134,"noIndex":6,"ogImage":4135,"ogUrl":4136,"ogSiteName":667,"ogType":668,"canonicalUrls":4136,"schema":4137},"How we’re improving self-managed billing","GitLab is introducing Seat Link in our 12.9 release to make renewals easier for our self-managed customers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679112/Blog/Hero%20Images/golden-gate.jpg","https://about.gitlab.com/blog/how-were-improving-self-managed-billing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we’re improving self-managed billing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Karampalas\"}],\n        \"datePublished\": \"2020-03-16\",\n      }",{"title":4133,"description":4134,"authors":4139,"heroImage":4135,"date":4141,"body":4142,"category":675,"tags":4143},[4140],"Michael Karampalas","2020-03-16","\n> **2020-04-01 UPDATE:** Based on feedback from the community, we have provided a way to [disable Seat Link](https://docs.gitlab.com/ee/subscriptions/#disable-seat-link). Thank you all for your feedback and for helping to make GitLab a better product for everyone. \n\nGitLab is excited to announce that we'll be introducing [Seat Link](https://docs.gitlab.com/ee/subscriptions/#seat-link) in 12.9 to help our self-managed customers add more users to their GitLab instance as their companies grow along with their user base. Historically, our renewal process has been overly complicated and confusing for one reason: True-ups.\n\n## The trouble with true-ups\n\nWhat’s a true-up? A true-up is a one-time charge at the time of renewal that accounts for the users added to an instance above the original subscription amount. As an example, if a company's subscription was originally for 100 users but the company grew to 125 over the course of the year, at renewal the company would owe for 12-months of usage for the extra 25, regardless of when in the year the users were added.\n\nTrue-ups were required because GitLab could not collect current information about self-managed user counts and growth. Our team could not determine if a customer had added users; when those users were added, and whether the customer is within their license. As a result, with each renewal customers had to pay for the full 12 months of users added since their previous renewal. We realize this is not ideal.\n\nWe had no way to prorate these charges, and the process relied on customers providing user counts, which is additional, manual work and has led to errors and confusion in the past.\n\n## Seat Link is our solution\n\nThat’s why we are excited to introduce [Seat Link](https://docs.gitlab.com/ee/subscriptions/#seat-link) with our upcoming 12.9 release.\n\nSeat Link is our first step toward providing self-managed customers with more transparent, prorated charges for user growth throughout the year. By using Seat Link, GitLab can automatically charge a prorated amount each quarter for users added to a self-managed instance.\n\nSeat Link means our customers no longer have to pull out the calculator for confusing math when their subscription renews, and there will be no more surprises, and most importantly, no more true-ups!\n\n### How Seat Link works\n\nEach day, Seat Link sends GitLab a count of all the users in connected, self-managed instances. The daily count means we have the information necessary to automate prorated reconciliations. By automating the user count process, we shift the burden of accounting for all users from our self-managed customers to GitLab. These data will be sent securely through an encrypted HTTPS connection:\n\n* Date\n* Historical maximum user count\n* Active user count\n* License key\n\nSee an [example of the POST request](https://docs.gitlab.com/ee/subscriptions/#seat-link) in our docs.\n\nSeat Link will be minimal and non-configurable, making it simpler for as many customers as possible to use. However, air-gapped or closed network customers will not be able to use Seat Link at this time, and we will continue using the existing true-up model.\n\nSeat Link will be available in GitLab 12.9, but we will not start processing prorated charges until a future date, with a tentative target of 12.10.\n\nQuestions? Concerns? Please join the discussion and [contribute to our epic](https://gitlab.com/groups/gitlab-org/-/epics/2747).\n\nCover image by [Modestas Urbonas](https://unsplash.com/@modestasu) on [Unsplash](https://unsplash.com/photos/vj_9l20fzj0)\n{: .note}\n",[9],{"slug":4145,"featured":6,"template":680},"how-were-improving-self-managed-billing","content:en-us:blog:how-were-improving-self-managed-billing.yml","How Were Improving Self Managed Billing","en-us/blog/how-were-improving-self-managed-billing.yml","en-us/blog/how-were-improving-self-managed-billing",{"_path":4151,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4152,"content":4158,"config":4163,"_id":4165,"_type":14,"title":4166,"_source":16,"_file":4167,"_stem":4168,"_extension":19},"/en-us/blog/how-you-can-help-shape-the-future-of-securing-applications-at-gitlab",{"title":4153,"description":4154,"ogTitle":4153,"ogDescription":4154,"noIndex":6,"ogImage":4155,"ogUrl":4156,"ogSiteName":667,"ogType":668,"canonicalUrls":4156,"schema":4157},"How you can help shape the future of securing applications with GitLab","We want to provide the best experience in keeping your application safe after your code is in production.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668495/Blog/Hero%20Images/how-you-can-help-shape-the-future-of-securing-applications-at-gitlab.jpg","https://about.gitlab.com/blog/how-you-can-help-shape-the-future-of-securing-applications-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How you can help shape the future of securing applications with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2019-11-14\",\n      }",{"title":4153,"description":4154,"authors":4159,"heroImage":4155,"date":4160,"body":4161,"category":299,"tags":4162},[784],"2019-11-14","This blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2019-12-09.\n\nAs part of our vision to deliver the entire DevOps lifecycle in a single application, we’re designing an experience that will allow security professionals to collaborate directly with developers. We need your help to make it the best it can be!\n\nOur newest product stage is Protect, and it’s an exciting time as we continue to define our [strategy and roadmap](/direction/govern/). The Protect UX team’s goal is to provide the best experience in keeping your application safe after your code is in production. This includes all features that help you defend your applications and cloud infrastructure by giving you the ability to identify, catalogue, manage, and remediate threats, vulnerabilities, and risks.\n\nSome of the new categories we’re planning for in 2020 include Runtime Application Self Protection, Threat Detection, User Entity and Behavioral Analytics and [more](/handbook/product/categories/#protect-section).\n\nWe have a ton of UX research planned to help us learn more about this new category, and we hope you consider adding your voice.\n\n### Our users' jobs to be done\n\nFrom what we know so far, the Protect user is responsible for maintaining the security of their company’s environments and applications. They seem to have a wide variety of job titles, including security analyst and SecOps engineer.\n\nWe aim to understand our different users’ motivations and goals by identifying their primary [jobs to be done](https://hbr.org/2016/09/know-your-customers-jobs-to-be-done). For the Protect user, these include things like:\n\n> When I make sure my company’s applications aren’t vulnerable to bad actors, I want to monitor the traffic coming to my application and detect the possibility of an attack (SQL injection attempts, XSS attempts, vulnerability scanners, etc.) so I can know what parts of the application I need to protect better.\n\n### Our recruiting challenge\n\nPerhaps because we’re best known for our origins in source code management, we usually have an abundance of participants who fit our software developer persona when we’re recruiting for studies. Newer personas like our Protect users have been more elusive by comparison — we’ve attempted studies where we couldn’t find a single human to speak with.\n\nThis is a real problem for us, as we believe strongly in evidence-based design. We want to build for your actual wants and needs as opposed to our assumptions about them.\n\n### How you can help\n\nIf any of this sounds like you, please sign up to our research program, [GitLab First Look](/community/gitlab-first-look/)! When you join, you can indicate exactly which product areas and types of research you’re interested in. We’ll send you invitations to participate when you match with studies.\n\nQuestions? Reach out to me on [twitter](https://twitter.com/EmvonHoffmann).\n\n[Sam Kerr](/company/team/#stkerr) and [Tali Lavi](/company/team/#tlavi) contributed to this post.\n\nCover image by [Rashid Khreiss](https://unsplash.com/@rush_intime) on [Unsplash](https://unsplash.com).\n",[700,9],{"slug":4164,"featured":6,"template":680},"how-you-can-help-shape-the-future-of-securing-applications-at-gitlab","content:en-us:blog:how-you-can-help-shape-the-future-of-securing-applications-at-gitlab.yml","How You Can Help Shape The Future Of Securing Applications At Gitlab","en-us/blog/how-you-can-help-shape-the-future-of-securing-applications-at-gitlab.yml","en-us/blog/how-you-can-help-shape-the-future-of-securing-applications-at-gitlab",{"_path":4170,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4171,"content":4177,"config":4183,"_id":4185,"_type":14,"title":4186,"_source":16,"_file":4187,"_stem":4188,"_extension":19},"/en-us/blog/illustrations-and-icons-on-gitlab-com",{"title":4172,"description":4173,"ogTitle":4172,"ogDescription":4173,"noIndex":6,"ogImage":4174,"ogUrl":4175,"ogSiteName":667,"ogType":668,"canonicalUrls":4175,"schema":4176},"Inside GitLab: Illustrations and icons on GitLab.com","Learn how our UX team creates icons and illustrations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666717/Blog/Hero%20Images/cover-image.jpg","https://about.gitlab.com/blog/illustrations-and-icons-on-gitlab-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside GitLab: Illustrations and icons on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hazel Yang\"}],\n        \"datePublished\": \"2017-12-04\",\n      }",{"title":4172,"description":4173,"authors":4178,"heroImage":4174,"date":4180,"body":4181,"category":743,"tags":4182},[4179],"Hazel Yang","2017-12-04","\nIn our 10.0 release, we introduced a [new navigation](/blog/unveiling-gitlabs-new-navigation/) complete with a redesigned color palette and icon set. We replaced [Font Awesome](http://fontawesome.io/icons/) with our own, SVG based, icon system, and we’ve also been hard at work on a series of illustrations to provide consistent visual language and improve our onboarding experience. Read on to find out more about how the UX team goes about creating new icons and illustrations.\n\n\u003C!-- more -->\n\nIllustrations and icons are powerful communication tools. They tell a story where words fail and can facilitate understanding across both language and culture barriers. Replacing text with illustrations and icons can make things clear at a glance. They also open up space and allow the eye to navigate more easily across the interface.\n\n## Illustrations\n\nA common mistake when designing a product is to assume that your users will understand how to use it. In reality, most users need a little help understanding where to start on their journey in order to discover all it has to offer. This is especially true for a product like GitLab, which is brimming with features. To assist users and [improve the onboarding experience](https://gitlab.com/gitlab-org/gitlab-ce/issues/15632), we decided to implement illustrations.\n\n### Defining the style\n\nTo begin, we reviewed our product’s existing styles to ensure that the illustrations we created would support a consistent brand experience for the application and our [official site](/).  During this review, we found that the visual design of these two products had diverged. The colors on our official website were vivid and energetic, orange and purple, while the colors of GitLab.com were soft and gentle, grey and white. Blending these two opposing styles into one set of illustrations was not going to be an easy task.\n\n{: .text-center}\n![gitlab-websites](https://about.gitlab.com/images/blogimages/illustrations-and-icons/gitlab-websites.png)\n\n### Visual consistency\n\nTo provide visual consistency across both products, we decided to pick up the primary, orange, and secondary, purple, colors from the official site for use in our illustrations. However, these two colors had a similar chroma and, used without modification, would create a jarring effect. Also, they just didn’t work well with the style of GitLab.com at the time. Our solution for this was to adjust the chroma of the two colors to generate new ones. These new colors played more harmoniously with the existing style of GitLab.com and allowed us to play with color in more creative ways.\n\n{: .text-center}\n![color-palettes](https://about.gitlab.com/images/blogimages/illustrations-and-icons/color-palettes.png)\n\n### Following GitLab values\n\n[Values](https://handbook.gitlab.com/handbook/values/) are important to us at GitLab. It was essential that our illustrations reflected these values and enhanced the brand experience to create a personal connection with our users. At GitLab we encourage people to maintain a positive attitude. Our illustrations needed to bring out a sense of playfulness, delight, and overall positivity.\n\n{: .text-center}\n![positive-illustration](https://about.gitlab.com/images/blogimages/illustrations-and-icons/positive-illustration.png){: .shadow}\n\nWe quickly found that these illustrations provided value as well as functionality. Used in an empty state, they inform users of features they may not know about and provide valuable onboarding. Used in error messaging, they quickly redirect users and get them back on track.\n\n{: .text-center}\n![errors-illustration](https://about.gitlab.com/images/blogimages/illustrations-and-icons/404.png){: .shadow}\n\nDiversity and inclusivity are essential to who we are as well. We have users, employees, and community members from many different cultural and geographical backgrounds. We reflected this variety of races, nationalities, and genders in the development of the illustrations for our [UX personas](https://design.gitlab.com/). We chose to use illustrations rather than stock photos. Illustrations make it easy to cover a variety of personas with no need to worry about copyrights.\n\n{: .text-center}\n![person-avatars](https://about.gitlab.com/images/blogimages/illustrations-and-icons/person-avatars.png){: .shadow}\n\nYou can find out more about our illustrations in the [handbook](https://docs.gitlab.com/ee/development/ux/).\n\n## Icons\n\nWhen GitLab was first in development, we chose Font Awesome as the primary icon set. It contained a variety of commonly used icons and was easy to implement. For an early-stage startup, it was a very useful tool.  \n\nAs GitLab matured, we needed more and more custom icons. These custom icons were created by our designers and, when mixed in with Font Awesome, led to an inconsistent visual style. Adding to the problem was the fact that we didn’t have a guide for icon usage. The lack of guidance caused [inconsistent](https://gitlab.com/gitlab-org/gitlab-ce/issues/29584) and [duplicated](https://gitlab.com/gitlab-org/gitlab-ce/issues/19751) icon usage to occur frequently. It confused users and had a detrimental effect on usability.\n\n### Creating our icons\n\nIt was time to build a consistent visual style and eliminate the confusion by [creating a complete custom icon set](https://gitlab.com/gitlab-org/gitlab-ce/issues/32894). Using distinct and unique iconography offered a powerful way to emphasize our unique personality.\n\n{: .text-center}\n![new-icon-set](https://about.gitlab.com/images/blogimages/illustrations-and-icons/new-icon-set.png){: .shadow}\n\nOnce again, consistency was key here. We gave our icons a thick border and rounded corners. Creating a consistent style between our illustrations and icons strengthened our brand identity by making it memorable and more easily recognizable.\n\nThick borders also help with accessibility. We were aware that some of our users adjusted their screen to higher resolutions, making an icon with a thin border harder to recognize. For this reason, we went with a `2x` width border.\n\n## The outcome\n\n### More recognizable and consistent visual language\n\nOur new color palette and icons on GitLab.com created a robust and consistent brand experience, making GitLab identifiable at a glance.\n\n### Illustrations for empty states and persona avatars\n\nMany of our empty state illustrations have been implemented, and we continue to develop more. You can see our avatar illustrations on [UX personas](https://design.gitlab.com/).\n\n{: .text-center}\n![example-empty-state](https://about.gitlab.com/images/blogimages/illustrations-and-icons/example-empty-state-issues.png){: .shadow}\n\n### Icons in contextual navigation and system notes\n\nWe have implemented most of our new icons on GitLab.com. You can find them in the [system notes](https://gitlab.com/gitlab-org/gitlab-ce/issues/24784) and [contextual navigation](https://gitlab.com/gitlab-org/gitlab-ce/issues/34027). Font Awesome will soon be completely phased out. We'd like to thank the Font Awesome team, their open source icon set allowed us to get very far, very fast!\n\n{: .text-center}\n![example-system-notes](https://about.gitlab.com/images/blogimages/illustrations-and-icons/system-notes.png){: .shadow}\n\n{: .text-center}\n![example-contextual-nav](https://about.gitlab.com/images/blogimages/illustrations-and-icons/contextual-nav-02.png){: .shadow}\n\n### Streamline process with the use of SVGs\n\nAll of our illustrations and icons are now exported as SVG files. Our Frontend AC Lead [Tim Zallmann](/company/team/#tpmtim) created [GitLab SVGs](http://gitlab-org.gitlab.io/gitlab-svgs/), a repository to manage all SVG Assets for GitLab. It creates SVG Sprites out of Icons and optimises SVG-based Illustrations. These are then exported to a live preview site. This enables the design team to add new icons and the frontend team to find icons quickly and easily.\n\n{: .text-center}\n![screenshot-gitlab-svgs](https://about.gitlab.com/images/blogimages/illustrations-and-icons/gitlab-svgs.png){: .shadow}\n\n## Conclusion\n\nYou will see GitLab's brand experience and UX design become more consistent and distinctive, and GitLab SVGs will soon be integrated into our [Design Library](https://gitlab.com/gitlab-org/gitlab-design/issues/26) we are working on. Stay tuned!\n",[1698,700,9],{"slug":4184,"featured":6,"template":680},"illustrations-and-icons-on-gitlab-com","content:en-us:blog:illustrations-and-icons-on-gitlab-com.yml","Illustrations And Icons On Gitlab Com","en-us/blog/illustrations-and-icons-on-gitlab-com.yml","en-us/blog/illustrations-and-icons-on-gitlab-com",{"_path":4190,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4191,"content":4197,"config":4202,"_id":4204,"_type":14,"title":4205,"_source":16,"_file":4206,"_stem":4207,"_extension":19},"/en-us/blog/impostorsyndrome-women-in-tech",{"title":4192,"description":4193,"ogTitle":4192,"ogDescription":4193,"noIndex":6,"ogImage":4194,"ogUrl":4195,"ogSiteName":667,"ogType":668,"canonicalUrls":4195,"schema":4196},"3 Tips for women in tech (and allies) to challenge impostor syndrome","Women at GitLab share valuable insights about mentorship, microinclusions, and remembering your hard work, to counter impostor syndrome.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681541/Blog/Hero%20Images/done_perfect.jpg","https://about.gitlab.com/blog/impostorsyndrome-women-in-tech","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Tips for women in tech (and allies) to challenge impostor syndrome\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-10-16\",\n      }",{"title":4192,"description":4193,"authors":4198,"heroImage":4194,"date":4199,"body":4200,"category":808,"tags":4201},[672],"2020-10-16","\n\nIn an environment where women-identifying individuals make up about [34.4% of the total workforce](https://builtin.com/women-tech/women-in-tech-workplace-statistics), tech is not necessarily the easiest environment for women to break into. The women, particularly those from underrepresented groups, who do join the tech industry ought to feel a particular sense of pride and ownership at their accomplishments – what they achieved is not easy. Unfortunately for many of us, success comes with the added baggage of impostor syndrome – that debilitating feeling that you're undeserving of your accomplishments, despite objective evidence that says otherwise.\n\n## A brief history of impostor syndrome\n\nWhile men can be impacted by impostor syndrome too, it is a feeling that occurs much more frequently among women, and other underrepresented groups. The concept of \"impostor syndrome\" was first introduced in a [1978 academic study](http://mpowir.org/wp-content/uploads/2010/02/Download-IP-in-High-Achieving-Women.pdf) by two psychology researchers who studied a group of high-achieving women in academia who described feeling undeserving of their success, in danger of being outed as secretly incompetent, or a feeling like a fraud. Notably, the majority of the women that took part in the study were striving in the predominately white and male atmosphere of academia in the late 1970s – which has some similarities to the [tech industry of today](/blog/what-diversity-inclusion-and-belonging-looks-like-in-the-tech-industry/).\n\n## Strategies for curbing your impostor syndrome\n\nImpostor syndrome is more complex than needing a confidence boost. In a [previous blog post looking at impostor syndrome in the context of remote work](/blog/imposter-syndrome-and-remote-work/), we showed some of the common ways impostor syndrome shows up, and explained how working remotely during the pandemic might be magnifying these anxieties.\n\nWe'll unpack some insights and strategies that can be used to overcome impostor syndrome that the women of GitLab shared during two fireside chats at [GitLab Virtual Contribute](/events/gitlab-contribute/), our annual community event. The fireside chats were hosted by the Women Team Member Resource Group (TMRG) and created an opportunity for women working in different capacities at our company to share their insights and experiences, from which other women and allies can learn.\n\n### 1. Identify a mentor\n\nIt's not easy to feel comfortable and confident in a workplace were you are dramatically outnumbered. And it's no wonder that impostor syndrome flourishes among women (and other diverse populations) trying to make their mark in mostly male workplaces.\n\n\"I think one of the biggest challenges is since tech is such a male-dominated field, there are not enough women in leadership positions,\" said [Orit Golowinski](/company/team/#ogolowinski), senior product manager of Release: Progressive Delivery at GitLab, during the fireside chat. \"I think that that's one of the challenges, that there's not enough role models for women going into tech to look and aspire to.\"\n\nIt can be daunting to be one of a few female team leads at your company, or to have aspirations of breaking through that glass ceiling. Regardless of whether you're in a senior or more junior level role, identifying a mentor early on in your career is helpful. In fact, [Sara Davila](/company/team/#saraedavila), Partner and Channel Marketing at GitLab, said she first joined tech after her mentor encouraged her to try something new. Sara's mentor gave her the confidence to leave her role in the oil and gas industry during an economic downturn and move into the tech world. \"I think mentorship for me is really important when it comes to career development because sometimes we're critical on ourselves and we don't know that we can do things that other people see in us,\" said Sara.\n\nGreat mentors like Sara had aren't necessarily easy to find, which is why GitLab launched its pilot [Women in Sales Mentorship program,](/handbook/people-group/women-in-sales-mentorship-pilot-program/) which paired women at GitLab working in the traditionally male-dominated field of tech sales with senior leaders in our company.\n\nAbout 33% of people managers in GitLab are women, but for right now, we don't have any women in executive or senior leadership roles. GitLab continues to be a male-dominated company, with roughly 31% of total GitLab team members self-identified as women, falling short of our goal of a company that is 40% women. [Our journey to a more diverse and inclusive workplace](/blog/our-journey-to-a-diverse-and-inclusive-workplace/) is a work in progress.\n\n### 2. Create opportunities for microinclusion\n\nHave you ever looked around the room at a sea of accomplished people, and felt like an impostor? That the hard work and accomplishments that got you to this place, in this moment are somehow less than the hard work and accomplishments of those around you?\n\nIn the [LeanIn 2020 Women in the Workplace report](https://leanin.org/women-in-the-workplace-2019?gclid=EAIaIQobChMIvLvxmNb-5wIVTNbACh0w4wQBEAAYAiAAEgI_ofD_BwE), 73% of women report experiencing microaggressions at work. Microaggressions – the interruptions, comments, and body language that hit like tiny paper cuts – only compound feelings of impostor syndrome for women and other marginalized groups.\n\n\"Something that I've particularly faced challenge-wise is just there are times when it feels like the other people don't believe you should be in the room with them and they don't value your contributions or what you have to say,\" said [Chloe Whitestone](/company/team/#chloe), technical account manager at GitLab.\n\nWhether you're experiencing the creep of impostor syndrome yourself, or the person next to you is feeling it, defaulting to microinclusions can be a strategy for putting yourself and the people around you at ease. \"I try to think about microinclusions as opposed to microaggressions,\" said [Michelle Hodges](/company/team/#mwhodges), vice president of Worldwide Channels, at GitLab. \"Work on the small little bits of things you can do to be more inclusive as opposed to always checking yourself. And I wrote something down a couple months ago about [microinclusion](https://interactioninstitute.org/micro-inclusion-a-small-step-to-include-someone/). It's from the [Interaction Institute for Social Change](https://interactioninstitute.org/) and it says, 'A symbolic action that forces us to recall the humanity of others.'\"\n\nYou never know when small gestures of kindness from you can make a sizable impact for others. [Robin Schulman](/company/team/#rschulman), chief legal officer and corporate secretary, at GitLab, recalls an interaction she had with a newly minted Vice President at a Board of Directors meeting at her previous company. Robin struck up conversation with the young woman during the meeting, which was attended mostly by older men, about her life and her work. Later, when Robin left the company for a new opportunity, she received an earnest email from the VP explaining how Robin made her feel comfortable in a setting where she felt uncomfortable.\n\n\"I was so sad for all the missed opportunities that I had to do that for someone else and sad for the opportunities that maybe I could have had if somebody had done that for me,\" said Robin.\n\nPerforming microinclusions has the effect of creating a more supportive professional environment. If you think that your colleague is experience a bout of impostor syndrome, ask them about it. We break silence and stigma by having honest conversations about impostor syndrome. Then, go a step further and act more deliberately on behalf of your friends and colleagues. Repeat and attribute their great ideas in meetings, talk up their skillsets to their manager, and nominate them for promotions, bonuses, and any form of public recognition. Defaulting to microinclusions and even acting as a hypeperson for your colleagues will help squash any competitive posturing at work and instead introduce a more supportive and compassionate atmosophere.\n\n### 3. Remember how hard you worked\n\nWomen, particularly those from underrepresented groups, have to work harder and more persistently for recognition in the workplace. These demands make it easy for women to fall into the trap of feeling like you need to be an expert in everything in order to get ahead.\n\nA [frequently quoted statistic](https://hbr.org/2014/08/why-women-dont-apply-for-jobs-unless-theyre-100-qualified) shows that women apply for jobs only when they meet 100% of the qualifications, while men apply when they meet 60% of the criteria. More [recent research out of LinkedIn](https://business.linkedin.com/content/dam/me/business/en-us/talent-solutions-lodestone/body/pdf/Gender-Insights-Report.pdf) indicates that women are less aggressive when it comes to their job search than men. Sometimes, you just need to pursue an opportunity with a [growth mindset](https://www.brainpickings.org/2014/01/29/carol-dweck-mindset/), trusting that you will learn what you need to as you go.\n\n\"One thing I would say is don't feel like you need to know everything because nobody knows everything,\" said [Aricka Flowers](/company/team/#atflowers), digital production manager at GitLab. \"Do your best. Work hard and continue to learn, but don't feel like you need to know everything, and that goes for just about any industry, to be honest.\"\n\n*New York Times* journalist [Jessica Bennett](https://www.nytimes.com/by/jessica-bennett) has a great section on impostor syndrome in her book *[Feminist Fight Club](https://www.feministfightclub.com/)*. One of the sections that really stands out is her observations about doubt. If you find yourself succumbing to negative self-talk and starting to freeze up, reframe that self-doubt into idea doubt. While self-doubt will leave you spiraling, idea doubt is motivating. Start iterating on your ideas until it starts to feel more solid. Then, when it comes time to pitch your idea, remember all the hard work that went into it.\n\nIn a [well-liked post on LinkedIn by Sales Foundation CEO Ebony Beckwith](https://www.linkedin.com/feed/update/urn:li:activity:6719331871941103616/), she invokes an insight from Mindy Kaling about impostor syndrome, stating that women of color are less likely to fall victim to impostor syndrome, because they know just how hard they had to work get a seat at the table.\n\n\"While feelings of doubt and fear can still creep in, rarely do thoughts of, 'How did I get here? I don't deserve to be here,'\" said Ebony. \"Because we know exactly how we got here – we did the work! Reminding myself of that hard work is my new cure for impostor syndrome.\"\n\n## Watch and learn\n\nWe hosted two fireside chats with women working in different capacities at GitLab during our Virtual Contribute event. We linked to one of the fireside chats below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/qS0kebPUhTo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More on this topic\n\n- [How to tackle impostor syndrome while working remotely](/blog/imposter-syndrome-and-remote-work/)\n- [We're working to empower Minorities in Tech with a new employee resource group (ERG)](/blog/gitlab-empowers-minorities-in-tech-with-erg/)\n- [How diversity, inclusion, and belonging works in the tech industry](/blog/what-diversity-inclusion-and-belonging-looks-like-in-the-tech-industry/)\n",[9,810],{"slug":4203,"featured":6,"template":680},"impostorsyndrome-women-in-tech","content:en-us:blog:impostorsyndrome-women-in-tech.yml","Impostorsyndrome Women In Tech","en-us/blog/impostorsyndrome-women-in-tech.yml","en-us/blog/impostorsyndrome-women-in-tech",{"_path":4209,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4210,"content":4215,"config":4221,"_id":4223,"_type":14,"title":4224,"_source":16,"_file":4225,"_stem":4226,"_extension":19},"/en-us/blog/improve-your-gitlab-productivity-with-these-10-tips",{"title":4211,"description":4212,"ogTitle":4211,"ogDescription":4212,"noIndex":6,"ogImage":4174,"ogUrl":4213,"ogSiteName":667,"ogType":668,"canonicalUrls":4213,"schema":4214},"10 tips to make you a productive GitLab user","Learn how quick actions can make you a more efficient GitLab user.","https://about.gitlab.com/blog/improve-your-gitlab-productivity-with-these-10-tips","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"10 tips to make you a productive GitLab user\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"},{\"@type\":\"Person\",\"name\":\"Roman Kuba\"}],\n        \"datePublished\": \"2021-02-18\",\n      }",{"title":4211,"description":4212,"authors":4216,"heroImage":4174,"date":1595,"body":4219,"category":743,"tags":4220},[4217,4218],"Michael Friedrich","Roman Kuba","\nMost people know GitLab is a solid tool in today's DevOps workflows, with code reviews, CI/CD, and project management all available for users in a single application. But there are always ways to be more efficient. Since we use GitLab to develop GitLab, everyone has their own habits and hidden gems to speed things up.\n\nWe chatted about GitLab efficiency tips after seeing new [quick actions releases in GitLab 13.8](/releases/2021/01/22/gitlab-13-8-released/#display-all-available-quick-actions-in-autocomplete), and decided to share some of our favorite tips with GitLab users. We share our typical day-to-day workflows as an engineering manager (Roman) and a developer (Michael) to show how quick actions make teams more productive and efficient.\n\n### Roman: Engineering manager starts planning\n\nI am an engineering manager on the [Create: Editor team](/handbook/product/categories/features/#createeditor-group) at GitLab. One of my responsibilities is capacity planning with product managers. Planning happens every month for the next [GitLab release](/releases/). GitLab uses the [milestone feature](https://docs.gitlab.com/ee/user/project/milestones/) to keep everything organized for the release. As planning goes on, I need to create a new issue for a new feature in the Web IDE. The issue description uses a [description template](https://docs.gitlab.com/ee/user/project/description_templates.html) which gets filled with the right context.\n\nBut instead of searching for the assignee in the dropdown, I just add a new line:\n\n```\n/assign @dnsmichi\n```\n\nAll quick actions start with a `/` character and will be interpreted by GitLab when the issue gets created. In addition to an assignee, issue labels need to be applied as well.\n\n```\n/label ~\"type::feature\"\n```\n\nYou can also assign multiple labels at once:\n\n```\n/label ~devops::create ~group::editor ~\"Category::Web IDE\"\n```\n\n![GitLab Quick Actions: Multiple labels](https://about.gitlab.com/images/blogimages/improve-your-gitlab-productivity-10-tips/quick_action_label_multiple.png)\nHow to apply multiple labels using GitLab quick actions.\n\nThe issue needs to be assigned to the next milestone. This can be done with another quick action:\n\n```\n/milestone %13.10\n```\n\nNote that 13.9 release planning already happened last month. The [product kickoff](/direction/kickoff/) highlights the planned features.\n\nThe keyboard shortcut `cmd + enter` now creates the issue without clicking a button.\n\nSo far, we were able to complete a lot of the necessary workflows around issues in one go, and without ever leaving the text box.\n\nAfter reviewing the issue I created, I remembered that this issue should be assigned to the `FY22Q1 Performance OKRs` epic. Again, we can use a quick action. It’s important to note here that referencing an epic works with the `&` character. When we type this character, we can start to search for the epic by typing its name.\n\n```\n/epic & \u003Csearch>\n```\n\nThis will turn into something like this:\n\n```\n/epic &123\n```\n\nAll quick actions can be used in a new comment and again using `cmd + enter` to save it.\n\nThe `FY22Q1 Performance OKRs` epic still needs to be added to a parent engineering OKR epic. So I'll navigate to the now-linked epic and use another quick action to set the parent epic.\n\n```\n/parent_epic & \u003Csearch>\n```\n\nWhen working with multiple levels of epics, remember to keep practicing quick actions to create visual epic trees quickly. That’s all for now from my manager's side.\n\n### Michael: A developer starts with code\n\nI work on the [Developer Evangelism team](/handbook/marketing/developer-relations/developer-evangelism/) at GitLab, and although I'm not technically a developer in the typical sense I still work with code on a daily basis. The average day starts with a new to-do. Today's to-do points me to the new issue that Roman created. After reviewing the issue requirements and defining the changes to be implemented, I start work: I'll clean up the work environment, pull the latest changes from the default branch (main/master), and create a new Git branch in my local terminal.\n\nAfter a few commits, my work day nears its end. I decide to publish the local Git branch and create a new Merge Request (MR). After creating the MR, the triage workflow kicks off. I mark the [MR as draft](https://docs.gitlab.com/ee/user/project/merge_requests/drafts.html) to prevent the workflow from starting before the MR is ready:\n\n```\n/draft\n```\n\nThe next day, I continue working on the MR and finish everything that was planned, so I need to remove the draft designation. The `draft` quick action is a toggle, so I can use it to assign and remove the `Draft` marker.\n\n```\n/draft\n```\n\nThe next step is to assign a reviewer for the MR. GitLab 13.7 added [merge request reviewers](/blog/merge-request-reviewers/), which means we can leave the MR assignee untouched. I'll use the livesearch to assign the right reviewer with a leading `@` character.\n\n```\n/assign_reviewer @ \u003Csearch>\n```\n\n![GitLab Quick Actions: Remove draft and assign reviewer](https://about.gitlab.com/images/blogimages/improve-your-gitlab-productivity-10-tips/quick_action_toggle_draft_assign_reviewer.png)\nHow to remove the draft and add a reviewer using GitLab quick actions.\n\nAfter the first round of review, I get feedback and items for follow-up. Since I am in the middle of a different tasks, I create a new to-do to remind myself of an open task to follow up on when I'm ready.\n\n```\n/todo\n```\n\nSince my work as a developer evanglist includes many topics and areas, I get distracted with other high priority tasks throughout the day. Later in the week, I'll come back to the MR. The review items have been addressed by team member suggestions and all threads are resolved now. The reviewer approves the MR with the quick action:\n\n```\n/approve\n```\n\nThe review process took a little while to complete, and because GitLab is a fast-changing project, the Git branch is outdated. I need to rebase against the default branch.\n\nBut since I am already working on something else, I do not want to stop what I am doing currently to rebase. Then I remember: GitLab 13.8 added the `/rebase` quick action. This schedules a new background job that attempts to rebase the branch, and stops operations if it fails.\n\nI open the MR and create a new comment. I start typing the rebase quick action, followed by `cmd+enter` to send it:\n\n```\n/rebase\n```\n\n![GitLab Quick Actions: Rebase](https://about.gitlab.com/images/blogimages/improve-your-gitlab-productivity-10-tips/quick_action_rebase.png){: .shadow.center}\nHow to rebase with GitLab quick actions.\n{: .note.text-center}\n\nPhew. It worked. The CI/CD pipeline is running, and I believe that the rebase did not break anything. I go to click the \"Merge after pipeline succeeds\" button, and remember there's a quick action for that.\n\n```\n/merge\n```\n\nThe quick action takes into account what is configured for the project: Either merge when the pipeline succeeds or add it to the [Merge Train](/blog/merge-trains-explained/).\n\nEverything happens automatically and I can continue working on other tasks. The manager (in this case, Roman) sees the issue being closed automatically using the `Closes` keyword. That's all from my developer's side.\n\nTip: [Automatically closing issues](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically) after the MR has been merged is an amazing workflow for everyone, assuming the manager has set the milestone accordingly.\n\nAt GitLab, we have documented our [engineering workflows](/handbook/engineering/workflow/) which can be followed more efficiently with the quick actions shown in this blog post.\n\n### Quick actions + description templates = ❤️\n\nWe demonstrated different ways quick actions can be used to complete common tasks more efficiently. But they do not always have to be applied manually. One shortcut is to just add them to [description templates](https://docs.gitlab.com/ee/user/project/description_templates.html) so you do not have to worry about remembering them all. This way, you can also automatically assign users, add labels, and much more based on the template you apply. Using description templates helps with project contributions and allows everyone to focus on the feature proposal or bug report.\n\nLet’s try it! Create a new project, navigate into \"Issues > Labels\" and generate a default set of labels. Next, open the Web IDE and add a new file in `.gitlab/issue_templates/bug.md`. Add the following content:\n\n```\n# Summary\n\n# Steps to reproduce\n\n1.\n1.\n1.\n\n\u003C!-- Do not edit the section below -->\n/label ~\"type::bug\"\n/assign @YOURUSER\n```\n\nFirst, replace YOURUSER with your username (make sure you're logged in). Commit the new file to the default branch, and navigate into the issue list. Next, create a new issue and select `bug` from the dropdown. Add some content, and submit the issue. Finally, verify that the label and assignee are both set.\n\nTip: This is not limited to issue templates, it also works with MRs and epics. At GitLab we also often use this function to dynamically assign people based on reports created automatically. There are many opportunities to use description templates.\n\n### More tips and insights\n\nWe have not yet tried the following quick actions - can you help us out? :-)\n\n```\n/shrug\n/tableflip\n```\n\nThere are more [quick actions](https://docs.gitlab.com/ee/user/project/quick_actions.html) and [keyboard shortcuts](https://docs.gitlab.com/ee/user/shortcuts.html) available. In fact, GitLab user [Gary Bell](https://gitlab.com/garybell) shared great insights on quick actions in his \"Tanuki Tuesday\" blog series:\n\n- [Quick Actions](https://www.garybell.co.uk/quick-actions-in-gitlab/)\n- [Keyboard Shortcuts](https://www.garybell.co.uk/using-keyboard-shortcuts-in-gitlab/)\n\nLet us know in the comments below which quick actions most helped your productivity and if you have other creative ways of using quick actions.\n\nPS: We also support shortcuts at GitLab, and the most loved shortcut is `cmd + k` for inserting a Markdown URL.\n\nCover image by [Juan Gomez](https://unsplash.com/@nosoylasonia) on [Unsplash](https://unsplash.com/photos/kt-wA0GDFq8)\n{: .note}\n",[723,811,9],{"slug":4222,"featured":6,"template":680},"improve-your-gitlab-productivity-with-these-10-tips","content:en-us:blog:improve-your-gitlab-productivity-with-these-10-tips.yml","Improve Your Gitlab Productivity With These 10 Tips","en-us/blog/improve-your-gitlab-productivity-with-these-10-tips.yml","en-us/blog/improve-your-gitlab-productivity-with-these-10-tips",{"_path":4228,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4229,"content":4235,"config":4240,"_id":4242,"_type":14,"title":4243,"_source":16,"_file":4244,"_stem":4245,"_extension":19},"/en-us/blog/incident-management-design-facilitation",{"title":4230,"description":4231,"ogTitle":4230,"ogDescription":4231,"noIndex":6,"ogImage":4232,"ogUrl":4233,"ogSiteName":667,"ogType":668,"canonicalUrls":4233,"schema":4234},"How we used design facilitation to understand incident management","The group responsible for the Monitor stage at GitLab recently got together to decide on new product features with a facilitated design session.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678649/Blog/Hero%20Images/incident_management-blog-image.jpg","https://about.gitlab.com/blog/incident-management-design-facilitation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used design facilitation to understand incident management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Amelia Bauerly\"}],\n        \"datePublished\": \"2019-03-15\",\n      }",{"title":4230,"description":4231,"authors":4236,"heroImage":4232,"date":4237,"body":4238,"category":808,"tags":4239},[1776],"2019-03-15","\nBefore starting to design a new product feature, it’s useful to get everyone on the same page by asking a few important questions: What is the problem we are trying to solve?\nWho are we solving this problem for?\nWhat are the steps we should take in trying to solve this problem?\n\nAs we work remotely, collaborating on these questions synchronously isn’t generally an option.\n\nRecently, the [Monitor group](/handbook/engineering/development/ops/monitor/) was given the opportunity to gather in Berlin for a Fast Boot.\nWe took advantage of everyone being in the same place and time zone to host a [facilitated design session](https://gitlab.com/gitlab-org/gitlab-ce/issues/55663) on incident management, where we could answer these questions together.\n\n## How the facilitated design session works\n\nThe session involved walking the group through three exercises, each focusing on one of the core questions we needed to solve.\n\n### We tackled problem definition through running a boundary critique exercise\n\nUsing the [1-2-4-All](http://www.liberatingstructures.com/1-1-2-4-all/) technique, we came up with a list of things incident management is and is not.\nSince we had engineers, designers, and product managers all working together, we were able to benefit from diverse perspectives and experience levels.\nWe finished the exercise by agreeing on a definition of the space we wanted to work on together.\n\n### Next, we did an exercise to build empathy with our users\n\nWe took our four [ops personas](/handbook/product/personas/), broke into groups, and compiled [empathy map canvases](https://gamestorming.com/wp-content/uploads/2017/07/Empathy-Map-Canvas-006.pdf) for each.\nWe then took our deepened understanding of our assigned users and applied it to an imagined incident.\nWe shared our users’ pain points, concerns, and goals with the group.\n\n### Finally, brainstorming product features\n\nHaving established a scope for our work and a sense of our users’ needs, our final exercise involved brainstorming product features that would fit the requirements we had established.\nWe finished the session with everyone dot-voting on features, which left us with a prioritized list of features to work on as we move forward with this project.\n\nThough working this way isn’t a part of our normal flow, the facilitation was a great chance for us all to engage with a product discovery process together.\nBy tackling these questions as a group, we could all come to alignment on what was needed going forward.\nParticipating in these early stages of planning also generates an extra level of commitment to seeing these features through the development process, since we had all agreed on the necessity for them.\n\nWe will continue to explore how to inject the energy and enthusiasm generated by this process into our normal, asynchronous workflow.\n",[811,677,9],{"slug":4241,"featured":6,"template":680},"incident-management-design-facilitation","content:en-us:blog:incident-management-design-facilitation.yml","Incident Management Design Facilitation","en-us/blog/incident-management-design-facilitation.yml","en-us/blog/incident-management-design-facilitation",{"_path":4247,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4248,"content":4254,"config":4258,"_id":4260,"_type":14,"title":4261,"_source":16,"_file":4262,"_stem":4263,"_extension":19},"/en-us/blog/inside-gitlab-security-dashboards",{"title":4249,"description":4250,"ogTitle":4249,"ogDescription":4250,"noIndex":6,"ogImage":4251,"ogUrl":4252,"ogSiteName":667,"ogType":668,"canonicalUrls":4252,"schema":4253},"How can teams secure applications at DevOps speed? Security Dashboards are here to help.","GitLab Security Dashboards enable security professionals to view vulnerabilities across a project. Here’s an inside look.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678710/Blog/Hero%20Images/inside-gitlab-security-dashboards.jpg","https://about.gitlab.com/blog/inside-gitlab-security-dashboards","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How can teams secure applications at DevOps speed? Security Dashboards are here to help.\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2018-09-14\",\n      }",{"title":4249,"description":4250,"authors":4255,"heroImage":4251,"date":3016,"body":4256,"category":743,"tags":4257},[2745],"\nBusiness survival today depends on a radically faster DevOps lifecycle, but how can teams secure applications at DevOps speed? It’s a thorny problem for a number of reasons: applications are a prime target for cyber attacks; most [application security](/topics/devsecops/) tools are resource intensive, requiring integration of both technology and processes; and testers face the dilemma of when and how often to test code that is iteratively changed right up until it’s deployed. Many are faced with weighing the need to test each iteration against the speed and cost of doing so, while the possibility of a rollback looms in the case of an unforeseen security vulnerability.\n\n>Many are faced with weighing the need to test each iteration against the speed and cost of doing so\n\nWe know that shifting left and discovering vulnerabilities earlier in the development process is important, but it’s tough to find the perfect balance, where teams can be confident they’re truly creating business value and not becoming a business inhibitor. It’s clear that our existing application security tools are colliding with modern development. So what if you could scan all code, every time for development, using fewer tools instead of more, and have developers and operations on the same page instead of adversarial?\n\n### Built-in security products\n\nIt’s going to take a fundamental shift by companies towards proactive security. With security issues reported directly in merge requests, one license cost for integrated security, and zero context-switching to proactively secure applications, we believe GitLab can help get you there.\n\nUsing multiple tools forces developers to switch away from their primary objective of developing code, or requires integrated workflows with security pros. We believe successful tools will add high value while minimizing distraction for engineers. With GitLab, [SAST](https://docs.gitlab.com/ee/user/application_security/sast/), [DAST](https://docs.gitlab.com/ee/user/application_security/dast/), [container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/), [dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), and [license management](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html) are all built in. Because there’s one tool for the software development lifecycle, you can automatically run tests on all code commits, early in the development process.\n\n### Security Dashboard demo\nIn 11.1, [we shipped Security Dashboards](/releases/2018/07/22/gitlab-11-1-released/), to help serve security professionals. Traditionally we’ve focused on the developer, but the Security Dashboard is meant to enable security professionals to view vulnerabilities across a project. Here’s a quick look at our first iteration of the Security Dashboard:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/U2_dqwTRUVk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nKeep an eye out for [improvements](https://gitlab.com/gitlab-org/gitlab-ee/issues/6709), and let us know what you think by tweeting us [@gitlab](https://twitter.com/gitlab)!\n\nCover photo by [Christian EM](https://unsplash.com/photos/J7EUjSlNQtg) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1440,677,9,720],{"slug":4259,"featured":6,"template":680},"inside-gitlab-security-dashboards","content:en-us:blog:inside-gitlab-security-dashboards.yml","Inside Gitlab Security Dashboards","en-us/blog/inside-gitlab-security-dashboards.yml","en-us/blog/inside-gitlab-security-dashboards",{"_path":4265,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4266,"content":4272,"config":4279,"_id":4281,"_type":14,"title":4282,"_source":16,"_file":4283,"_stem":4284,"_extension":19},"/en-us/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features",{"title":4267,"description":4268,"ogTitle":4267,"ogDescription":4268,"noIndex":6,"ogImage":4269,"ogUrl":4270,"ogSiteName":667,"ogType":668,"canonicalUrls":4270,"schema":4271},"Inside look: How GitLab's Test Platform team validates AI features","Learn how we continuously analyze AI feature performance, including testing latency worldwide, and get to know our new AI continuous analysis tool.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099033/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_78Dav6FR9EGjhebHWuBVan_1750099033422.png","https://about.gitlab.com/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside look: How GitLab's Test Platform team validates AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Lapierre\"},{\"@type\":\"Person\",\"name\":\"Vincy Wilson\"}],\n        \"datePublished\": \"2024-06-03\",\n      }",{"title":4267,"description":4268,"authors":4273,"heroImage":4269,"date":4276,"body":4277,"category":1839,"tags":4278},[4274,4275],"Mark Lapierre","Vincy Wilson","2024-06-03","AI is increasingly becoming a centerpiece of software development - many companies are integrating it throughout their DevSecOps workflows to improve productivity and increase efficiency. Because of this now-critical role, AI features should be tested and analyzed on an ongoing basis. In this article, we take you behind the scenes to learn how [GitLab's Test Platform team](https://handbook.gitlab.com/handbook/engineering/infrastructure/test-platform/) does this for [GitLab Duo](https://about.gitlab.com/gitlab-duo/) features by conducting performance validation, functional readiness, and continuous analysis across GitLab versions. With this three-pronged approach, GitLab aims to ensure that GitLab Duo features are performing optimally for our customers.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## AI and testing\n\nAI's non-deterministic nature, where the same input can produce different outputs, makes ensuring a great user experience a challenge. So, when we integrated AI deep into the GitLab DevSecOps Platform, we had to adapt to our best practices to address this challenge. \n\nThe [Test Platform team's mission ](https://handbook.gitlab.com/handbook/engineering/infrastructure/test-platform/) is to help enable the successful development and deployment of high-quality software applications with continuous analysis and efficiency to help ensure customer satisfaction. The key to achieving this is by delivering tools that help increase standardization, repeatability, and test consistency. \n\nApplying this to GitLab Duo, our AI suite of tools to power DevSecOps workflows, means being able to continuously analyze its performance and identify opportunities for improvement. Our goal is to gain clear, actionable insights that will help us to enhance GitLab Duo's capabilities and, as a result, better meet our customers' needs. \n\n## The need for continuous analysis of AI\n\nTo continuously assess GitLab Duo, we needed a mechanism for analyzing feature performance across releases. Therefore, we created an AI continuous analysis tool to automate the collection and analysis of data to achieve this. \n\n![diagram of how the AI continuous analysis tool works](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099041/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099041503.png)\n\n\u003Ccenter>\u003Ci>How the AI continuous analysis tool works\u003C/i>\u003C/center>\n\n### Building the AI continuous analysis tool\n\nTo gain detailed, user-centric insights, we needed to gather data in the appropriate context – in this case, the integrated development environment (IDE), as it is where most of our users access GitLab Duo. We narrowed this down further by opting for the Visual Studio Code IDE, a popular choice within our community. Once the environment was chosen, we automated entering code prompts and recording the provided suggestions. The interactions with the IDE are handled by the [WebdriverIO VSCode service](https://github.com/webdriverio-community/wdio-vscode-service), and CI operations are handled through [GitLab CI/CD](https://docs.gitlab.com/ee/ci/). This automation significantly scaled up data collection and eliminated repetitive tasks for GitLab team members. To start, we have focused on measuring the performance of GitLab Duo Code Suggestions, but plan to expand to other GitLab AI features in the future.\n\n### Analyzing the data\n\nAt the core of our AI continuous analysis tool is a mechanism for collecting and analyzing code suggestions. This involves automatically entering code prompts, recording the suggestions provided, and logging timestamps of relevant events. We measure the time from when the tool provides an input until a suggestion is displayed in the UI. In addition, we record the logs created by the IDE, which report the time it took for each suggestion response to be received. With this data, we can compare the latency of suggestions in terms of how long it takes the backend AI service to send a response to the IDE, and how long it takes for the IDE to display the suggestion for the user. We then can compare latency and other metrics of GitLab Duo features across multiple releases. The GitLab platform has the ability to analyze [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) and [application security](https://docs.gitlab.com/ee/user/application_security/), so we leverage these capabilities to enable the AI continuous analysis tool to analyze the quality and security of the suggestions provided by GitLab Duo.\n\n### Improving AI-driven suggestions\n\nOnce the collected data is analyzed, the tool automatically generates a single report summarizing the results. The report includes key statistics (e.g., mean latency and/or latency at various percentiles), descriptions of notable differences or patterns, links to raw data, and CI/CD pipeline logs and artifacts. The tool also records a video of each prompt and suggestion, which allows us to review specific cases where differences are highlighted. This creates an opportunity for the UX researchers and development teams to take action on the insights gained, helping to improve the overall user experience and system performance.\n\nThe tool is at an early stage of development, but it's already helped us to improve the experience for GitLab Duo Code Suggestions users. Moving forward, we plan to expand our tool’s capabilities, incorporate more metrics and consume and provide input to our [Centralized Evaluation Framework](https://about.gitlab.com/direction/ai-powered/ai_model_validation/ai_evaluation/), which validates AI models, to enhance our continuous analysis further.\n\n## Performance validation\n\nAs AI has become integral to GitLab's offerings, optimizing the performance of AI-driven features is essential. Our performance tests aim to evaluate and monitor the performance of our GitLab components, which interact with AI service backends. While we can monitor the performance of these external services as part of our production environment's observability, we cannot control them. Thus, including third-party services in our performance testing would be expensive and yield limited benefits. Although third-party AI providers contribute to overall latency, the latency attributable to GitLab components is still important to check. We aim to detect changes that might lead to performance degradation by monitoring GitLab components. \n\n### Building AI performance validation test environment\n\nIn our AI test environments, the [AI Gateway](https://docs.gitlab.com/ee/architecture/blueprints/ai_gateway/#summary), which is a stand-alone service to give access to AI features to GitLab users, has been configured to return mocked responses, enabling us to test the performance of AI-powered features without interacting with third-party AI service providers. We conduct AI performance tests on [reference architecture environments of various sizes](https://docs.gitlab.com/ee/administration/reference_architectures/). Additionally, we evaluate new tests in their own isolated environment before they're added to the larger environments.\n\n### Testing multi-regional latency\n\nMulti-regional latency tests need to be run from various geolocations to validate that requests are being served from a suitable location close to the source of the request. We do this today with the use of the [GitLab Environment Toolkit](https://gitlab.com/gitlab-org/gitlab-environment-toolkit). The toolkit provisions an environment in the identified region to test (note: both the AI Gateway and the provisioned environment are in the same region), then uses the [GitLab Performance Tool](https://gitlab.com/gitlab-org/quality/performance) to run tests to measure time to first byte (TTFB). TTFB is our way of measuring time to the first part of the response being rendered, which contributes to the perceived latency that a customer experiences. To account for this measurement, our tests have a check to help ensure that the [response itself isn't empty](https://gitlab.com/gitlab-org/quality/performance/-/blob/cee8bef023e590e6ca75828e49f5c7c596581e06/k6/tests/experimental/api_v4_code_suggestions_generation_streaming.js#L70). \n\nOur tests are expanding further to continue to measure perceived latency from a customer’s perspective. We have captured a set of baseline response times that indicate how a specific set of regions performed when the test environment was in a known good state. These baselines allow us to compare subsequent environment updates and other regions to this known state to evaluate the impact of changes. These baseline measurements can be updated after major updates to ensure they stay relevant in the future. \n\nNote: As of this article's publication date, we have AI Gateway deployments across the U.S., Europe, and Asia. To learn more, visit our [handbook page](https://handbook.gitlab.com/handbook/engineering/development/data-science/ai-powered/ai-framework/#-aigw-region-deployments).\n\n## Functionality\n\nTo help continuously enable customers to confidently leverage AI reliably, we must continuously work to ensure our AI features function as expected.\n\n### Unit and integration tests\n\nFeatures that leverage AI models still require rigorous automated tests, which help engineers develop new features and changes confidently. However, since AI features can involve integrating with third-party AI providers, we must be careful to stub any external API calls to help ensure our tests are fast and reliable.\n\nFor a comprehensive look at testing at GitLab, look at our [testing standards and style guidelines](https://docs.gitlab.com/ee/development/testing_guide/). \n\n### End-to-end tests \n\nEnd-to-end testing is a strategy for checking whether the application works as expected across the entire software stack and architecture. We've implemented it in two ways for GitLab Duo testing: using real AI-generated responses and mock-generated AI responses.\n\n![validating features - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099041/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099041504.png)\n\n\u003Ccenter>\u003Ci>End-to-end test workflow\u003C/i>\u003C/center>\n\n#### Using real AI-generated responses\n\nAlthough costly, end-to-end tests are important to help ensure the entire user experience functions as expected. Since AI models are non-deterministic, end-to-end test assertions for validating real AI-generated responses should be loose enough to help ensure the feature functions without relying on a response that may change. This might mean an assertion that checks for some response with no errors or for a response we are certain to receive.\n\nAI-driven functionality is not accessible only from within the GitLab application, so we must also consider user workflows for other applications that leverage these features. For example, to cover the use case of a developer requesting code suggestions in [IntelliJ IDEA](https://www.jetbrains.com/idea/) using the GitLab Duo plugin, we need to drive the IntelliJ application to simulate a user workflow. Similarly, to ensure that the GitLab Duo Chat experience is consistent in VS Code, we must drive the VS Code application and exercise the GitLab Workflow extension. Working to ensure these workflows are covered helps us maintain a consistently great developer experience across all GitLab products. \n\n#### Using mock AI-generated responses\n\nIn addition to end-to-end tests using real AI-generated responses, we run some end-to-end tests against test environments configured to return mock responses. This allows us to verify changes to GitLab code and components that don’t depend on responses generated by an AI model more frequently.\n\n> For a closer look at end-to-end testing, read our [end-to-end testing guide](https://docs.gitlab.com/ee/development/testing_guide/end_to_end/). \n\n### Exploratory testing and dogfooding\n\nAI features are built by humans for humans. At GitLab, exploratory testing and dogfooding greatly benefit us. GitLab team members are passionate about what features get shipped, and insights from internal usage are invaluable in shaping the direction of AI features.\n\n[Exploratory testing](https://about.gitlab.com/topics/devops/devops-test-automation/#test-automation-stages) allows the team to creatively exercise features to help ensure edge case bugs are identified and resolved. Dogfooding encourages team members to use AI features in their daily workflows, which helps us identify realistic issues from realistic users. For a comprehensive look at how we dogfood AI features, look at [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/).\n\n## Get started with GitLab Duo\nHopefully this article gives you insight into how we are validating AI features at GitLab. We have integrated our team's process into our overall development as we iterate on GitLab Duo features. We encourage you to try GitLab Duo in your organization and reap the benefits of AI-powered workflows.\n\n> Start a [free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) today!\n\n_Members of the GitLab Test Platform team contributed to this article._\n",[1299,677,475,9,722,1295],{"slug":4280,"featured":91,"template":680},"inside-look-how-gitlabs-test-platform-team-validates-ai-features","content:en-us:blog:inside-look-how-gitlabs-test-platform-team-validates-ai-features.yml","Inside Look How Gitlabs Test Platform Team Validates Ai Features","en-us/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features.yml","en-us/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features",{"_path":4286,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4287,"content":4293,"config":4299,"_id":4301,"_type":14,"title":4302,"_source":16,"_file":4303,"_stem":4304,"_extension":19},"/en-us/blog/inside-our-new-product-manager-persona",{"title":4288,"description":4289,"ogTitle":4288,"ogDescription":4289,"noIndex":6,"ogImage":4290,"ogUrl":4291,"ogSiteName":667,"ogType":668,"canonicalUrls":4291,"schema":4292},"What do product managers need to do their best work?","Check out some of the findings that led to our new Product Manager Persona.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678857/Blog/Hero%20Images/investigating-how-product-managers-use-gitlab.jpg","https://about.gitlab.com/blog/inside-our-new-product-manager-persona","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What do product managers need to do their best work?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Katherine Okpara\"}],\n        \"datePublished\": \"2018-11-12\",\n      }",{"title":4288,"description":4289,"authors":4294,"heroImage":4290,"date":4296,"body":4297,"category":787,"tags":4298},[4295],"Katherine Okpara","2018-11-12","\nRecently I spoke with several product managers and asked them about their experiences, as part of our [effort to create personas](/blog/personas-and-empathy-building/) for every one of GitLab's [product areas](/handbook/product/categories/). I gained a lot of insight through these interviews, including a better understanding of their daily duties, goals and motivations, challenges they face in their roles, and the tools they use throughout the software development lifecycle. Many of the findings have been included in our new [Product Manager Persona, Parker](/handbook/product/personas/), to help our own PMs brainstorm improvements and next steps for GitLab features. You can peruse the highlights and the persona itself below, and let us know what you think by tweeting us [@gitlab](https://twitter.com/gitlab)!\n\n## The research\n\nHere are some of the findings from my [eight interviews](https://gitlab.com/gitlab-org/ux-research/issues/88) conducted for the persona.\n\n### So, what’s the hardest part about being a product manager?\n\nThe product manager persona represents people who are responsible for prioritizing feature requests, product roadmapping, and tracking progress of the development of software applications. Since many of these factors depend on how other team members perform, most challenges related to communication and ensuring that their team delivers on time.\n\n#### Staying updated on team progress and important decisions\n\nIt can be difficult to know the status of certain requirements when other team members do not update the various tools that are being used. Important information can get lost along the way, which often leads to repetitive discussions or fixing incorrect work. Users were looking for ways to have this information readily accessible and consistently communicated throughout their teams.\n\n> \"Getting other people to use the tools. I need to make sure that other people are updating the Jira board for example – in my experience, many developers don’t exactly love to do this since it’s a tedious task. Or, if they have a question, adding it in the task so that we can keep a record of everything that’s being worked on. Sometimes someone will send me a question on Slack and I’ll copy-paste that into the ticket since sometimes it’s easier for me to do that than to ask someone to get used to doing that.\"\n\n#### Prioritizing features to build when dealing with limited resources\n\nProduct managers are often responsible for defining and scoping features, incorporating company objectives into the product roadmap, and giving developers and designers the requirements they need to deliver strong features. As a result, product teams often have trouble balancing feature requests with development capacity.\n\n> \"...Being able to find balance between being strategic and being practical. Being able to look into the future and be ambitious while at the same time having to put out fires and manage the day-to-day. Another challenge is staying in touch with the end user. We do not have as much time to be on top of the market and to interview customers. We're not as able to get market feedback and do market research as well...\"\n\n#### Simplifying information\u2028 for the different stakeholders involved in the product\n\nThe need to give clients and stakeholders timelines and estimates that are accurate but also realistic can be very stressful for a product manager. This is largely due to the fact that a cycle is often unpredictable. It can also be challenging to explain why certain features have been delayed or deprioritized, when customers and upper-level management are not working closely with the team.\n\n>\u2028\"Some of the challenges of working with the technical team leads is that they will forget to update things or they’ll give me a summary that is super technical so I have to ask more questions to make sure that I understand and have the ability to explain to other product managers where the developers are stuck, because they need more definition on what that feature should look like.\"\n\n### What motivates a product manager?\n\nProduct managers generally are motivated by the desire to deliver high-quality features in a timely manner. When company objectives shift, they want to have a standard process for communication, so that they can be in sync with all team members. They need to see an overview of all the relevant information related to a feature or product, so that they can monitor progress throughout a cycle. Additionally, they want to be able to help their teams accomplish more of their goals over time.\n\n### What’s the best part about being a product manager?\n\nAll in all, the interviewees all expressed the joy they receive from simply doing their jobs, whether that’s improving life for users or speeding up processes within the company. The best part of being a product manager is the opportunity to bring a concept to life and solve real problems for their users.\n\n## The persona\n\n![Parker, Product Manager persona](https://about.gitlab.com/images/blogimages/product-manager-persona.png){: .shadow.center}\n\nKeep an eye out for the rest of our series on the [new personas](/handbook/product/personas/)!\n\n[Photo](https://unsplash.com/photos/YiRQIglwYig) by [Hello I'm Nik](https://unsplash.com/@helloimnik) on Unsplash\n{: .note}\n",[722,9,723],{"slug":4300,"featured":6,"template":680},"inside-our-new-product-manager-persona","content:en-us:blog:inside-our-new-product-manager-persona.yml","Inside Our New Product Manager Persona","en-us/blog/inside-our-new-product-manager-persona.yml","en-us/blog/inside-our-new-product-manager-persona",{"_path":4306,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4307,"content":4313,"config":4318,"_id":4319,"_type":14,"title":4320,"_source":16,"_file":4321,"_stem":4322,"_extension":19},"/en-us/blog/insights",{"title":4308,"description":4309,"ogTitle":4308,"ogDescription":4309,"noIndex":6,"ogImage":4310,"ogUrl":4311,"ogSiteName":667,"ogType":668,"canonicalUrls":4311,"schema":4312},"We're dogfooding a tool to help visualize high-level trends in GitLab projects","How our easy to configure Insights technology takes data from issues and merge requests to build visually appealing charts.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681053/Blog/Hero%20Images/birdseyeview.jpg","https://about.gitlab.com/blog/insights","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're dogfooding a tool to help visualize high-level trends in GitLab projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-01-30\",\n      }",{"title":4308,"description":4309,"authors":4314,"heroImage":4310,"date":4315,"body":4316,"category":743,"tags":4317},[672],"2020-01-30","\n\nOur policy at GitLab is to [dogfood everything](/handbook/engineering/development/principles/#dogfooding) – meaning we aren't going to introduce a new product or feature to our [DevOps platform](/solutions/devops-platform/) before our engineering team tests it out. Sometimes though, the development process happens in reverse: The product and engineering teams need a specific tool or functionality to help us run GitLab better and discover a tool that has the capacity to solve many different customer use cases.\n\n[Insights](https://docs.gitlab.com/ee/user/project/insights/), which is available to [GitLab Ultimate](/pricing/ultimate/) users, is an example of such a tool. Insights is a flexible feature of GitLab that allows our users to visualize different trends in workflows, bugs, merge request (MR) throughput, and issue activity that is based upon the underlying labeling system of a group. In this blog post, we'll go in-depth on how and why we built this tool, how we use the tool at GitLab, and explain how to configure Insights for your own projects.\n\n\n- [Why we built Insights](#why-we-built-insights)\n- [Labels powers Insights](#why-label-hygiene-matters)\n- [How to configure Insights](#configuring-your-insights-dashboard)\n- [How GitLab uses Insights](#how-we-are-dogfooding-insights)\n- [Implementing Insights in your instance](#implementing-insights-for-your-team)\n\n[Kyle Wiebers](/company/team/#kwiebers), quality engineering manager on Engineering Productivity, gives an overview of how we use Insights at GitLab in the GitLab Unfiltered video embedded below. Watch the video and read the rest of the post to learn all about this exciting new tool we're dogfooding at GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/kKnQzS9qorc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Why we built Insights\n\nThe [Engineering Productivity team](/handbook/engineering/quality/#engineering-productivity) at GitLab first built Insights to provide an overview of trends in the issue tracker, but soon realized that this technology can be applied in different ways that were beneficial to our needs, and the needs of our users.\n\n\"The initial thing was we were interested in when the bugs were being raised: Were they being raised around release time or were they being raised the middle of a phase?\" says [Mark Fletcher](/company/team/#markglenfletcher), backend engineer on Engineering Productivity. \"Because we did have bugs being created just after release, which led to regressions, which led to patch fixes. So we were just interested in exploring those kinds of trends.\"\n\nTo capture this trend data the Quality Engineering team created the [quality dashboard](https://quality-dashboard.gitlap.com/groups/gitlab-org), which was essentially the first iteration of Insights for GitLab. While the quality dashboard showed trends in bugs being raised per release cycle, it also showed how much work was being accomplished over the same period.\n\n\"And that's where the scope really changed from looking at issues that are bugs to merge requests and being able to have generic rules based on labels that we can use to align with our workflow,\" says Kyle.\n\n## Why label hygiene matters\n\nThe Engineering Productivity team soon realized that a lot of the different trends they were aiming to capture with Insights were powered by [labels](https://docs.gitlab.com/ee/user/project/labels.html#overview). Labels allow a GitLab user to categorize epics, issues, and merge requests with descriptive titles such as \"bug\" or \"feature request\" and quickly filter based upon category. The label filtering system works inside the [issue tracker](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&first_page_size=100), and all throughout GitLab, and is a core part of the underlying configuration of Insights.\n\nA good example of an Insights dashboard that is configured by labels and the metadata that underlies issues and merge requests (such as creation date) is the [MR throughputs dashboard](https://gitlab.com/groups/gitlab-org/-/insights/#/throughputs).\n\n![Merge request throughputs for group](https://about.gitlab.com/images/blogimages/merge_throughputs_group.png){: .shadow.medium.center}\nA screenshot of the chart for merge request throughouts at the group level.\n{: .note.text-center}\n\nThe MR throughputs dashboard captures how many MRs are completed during a given week or month to measure our organization's overall performance. It is part of our workflow to assign labels to MRs that help distinguish the type of MR being worked on: feature, bug, community contribution, security, or backstage. This dashboard is configured as a stacked bar chart, which makes it easy to visualize MR throughput by type so we can see the type of work being created over a fixed period of time. The chart is also divided into weekly or monthly views, which helps us see both short- and long-term trends.\n\n\"So, we can look at short-term trends and longer-term trends to see: Are we delivering more work? Are we hitting a bottleneck? Are we plateauing? And that allows us to dive a little bit deeper and take corrective action,\" says Kyle.\n\n### Labels help simplify the configuration of dashboards\n\nIf you look to the lefthand sidebar of the MR throughputs dashboard, you'll notice that the dashboard is configured at the Gitlab-org group level. The group level of GitLab-org contains all of the projects within GitLab-org and therefore captures all of the MR throughput data across all projects.\n\nThe project level is a level below the group level and looks at a specific project contained within a larger group, such as the GitLab project in the GitLab-org group.\n\n![Merge request throughputs for project](https://about.gitlab.com/images/blogimages/mr_throughputs_product.png){: .shadow.medium.center}\nA screenshot of the chart for merge request throughoutputs at the project level.\n{: .note.text-center}\n\nAny Insights dashboard, including the MR throughputs dashboard, can be filtered at the group level or the project level, but the configuration remains the same regardless of how the dashboard is filtered.\n\n\"So everything that's contained within a group, and in our case, it would be the GitLab-org group, you can also have this on a project level,\" says Kyle. \"So if you want to look at Insights on a project, you can configure the same thing on a project. Just for our use case, it made sense to look at MR throughputs across multiple projects versus one specific project.\"\n\nBut in the end, it all comes back to labels. We don't have to configure the Insights dashboard differently for groups and projects because all of our labels at GitLab are set up at the group level and then propagate down to the project level.\n\nOne of the characteristics of Insights that makes it such a valuable feature is that the configuration is so flexible. While most customers will use the same labeling system across groups and projects as GitLab does, it is possible to configure the charts separately at the project and group level.\n\n\"The scope [of Insights] changed from looking at issues that are bugs to merge requests and being able to have generic rules based on labels that we can use to align with our workflow,\" says Kyle. \"Then that flexibility allows any customers to leverage the same feature based on their own specific workflow or labeling practices.\"\n\nA user can use Insights on a group or project regardless of the underlying labeling system. They just need to configure the dashboard according to their workflow.\n\n## Configuring your Insights dashboard\n\nThere are numerous Insights dashboards that are available out of the box or that can be [easily configured](https://docs.gitlab.com/ee/user/project/insights/#configure-your-insights) based on a user's labeling workflow.\n\nAll of the Insights dashboards within GitLab are [driven by a YAML file](https://gitlab.com/gitlab-org/quality/insights-config/-/blob/master/.gitlab/insights.yml). The configuration for each chart includes configuration parameters: title, type, and query.\n\nThe query section defines the type of issues and/or merge requests from the issue tracker that will be included in the chart. The [parameters for which labels are contained in the chart](https://docs.gitlab.com/ee/user/project/insights/#queryfilter_labels) fall under the query section as well.\n\n\"The Insights configuration is actually stored in [one of your project's repositories]. So, it can be changed just like you do any of your code. It can be [version-controlled](/topics/version-control/) so you can see changes over time. That gives you a lot of value to just ensure that there's very clear traceability into why was this dashboard changed, and when was it changed,\" says Kyle.\n\nHere is the configuration that underlies the [MR throughputs dashboard](https://gitlab.com/groups/gitlab-org/-/insights/#/throughputs) we looked at extensively in the section above.\n\n```\nthroughputs:\n  title: Merge Request Throughputs (product only projects)\n  charts:\n    - title: Throughputs per Week\n      type: stacked-bar\n      query:\n        issuable_type: merge_request\n        issuable_state: merged\n        collection_labels:\n          - Community contribution\n          - security\n          - bug\n          - feature\n          - backstage\n        group_by: week\n        period_limit: 12\n    - title: Throughputs per Month\n      type: stacked-bar\n      query:\n        issuable_type: merge_request\n        issuable_state: merged\n        collection_labels:\n          - Community contribution\n          - security\n          - bug\n          - feature\n          - backstage\n        group_by: month\n        period_limit: 24\n```\n{: .language-ruby}\n\nExplore the [Insights YAML file for GitLab](https://gitlab.com/gitlab-org/gitlab-insights/blob/master/.gitlab/insights.yml) to see how we set up some of our other charts.\n\n## How we are dogfooding Insights\n\nInsights is most effective at monitoring high-level trends, as well as measuring performance against a specific measurable objective with the aim of taking corrective action. At GitLab, we've been using our Insights technology in different ways to visualize our overall performance or to answer specific questions.\n\nOur Support and Quality Engineering teams at GitLab currently use Insights, but in different ways. By dogfooding the technology here at GitLab, we've found numerous use cases for Insights that could be valuable to our customers.\n\n### How our Support team uses Insights\n\nThe Support team uses Insights both as an out of the box issue tracking dashboard and as a customized dashboard made possible using automation.\n\n#### Bugs SLO chart\n\nThe [Bugs SLO dashboard](https://gitlab.com/gitlab-org/gitlab/insights/#/bugsPastSLO) was created so the Support department and engineering leaders can identify bugs overdue from SLO.\n\n![Support team Bugs SLO chart](https://about.gitlab.com/images/blogimages/bugs_slo.png){: .shadow.medium.center}\nA chart specially configured for the Support team to show how many bugs missed the SLO each month.\n{: .note.text-center}\n\nThe Bugs SLO chart is configured in the GitLab-org group but lives in the GitLab project. The chart pulls open issues pertaining to bugs and customer bugs, that are labeled `missed-SLO` and groups them by month. We also have a [labeling system for categorizing based on priority](https://docs.gitlab.com/ee/development/labels/index.html#priority-labels) – P1 bugs are top priority, P2 bugs are second priority.\n\n\"This really allows us to, again, look at the trends: Are we improving? Are we getting worse? Do we need to look a little bit deeper here and do a corrective action to help address any problems that we see within the trends that Insights provides?\" says Kyle.\n\n#### Configuration of SLO chart\n\nHere is a peek at what happens inside the YAML file to configure the bugs SLO chart.\n\n```\nbugsPastSLO:\n  title: Bugs Past SLO\n  charts:\n    - title: Open bugs past priority SLO by creation month\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - bug\n          - missed-SLO\n        collection_labels:\n          - P1\n          - P2\n        group_by: month\n        period_limit: 24\n    - title: Open customer bugs past priority SLO by creation month\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - bug\n          - missed-SLO\n          - customer\n        collection_labels:\n          - P1\n          - P2\n        group_by: month\n        period_limit: 24\n```\n{: .language-ruby}\n\n#### Triage helps ensure good label hygiene\n\nFor the Bugs SLO chart, we use the [GitLab triage project](https://gitlab.com/gitlab-org/gitlab-triage) to [automatically apply the `missed-SLO` label to open issues with priority labels that miss the SLO target](/handbook/engineering/quality/triage-operations/#missed-slo). We use automation here because the GitLab project is so massive, it would not be feasible to manually apply this label based upon the missed SLO target rules. Insights is flexible enough that either manual labeling or automation can be used on any dashboard.\n\n### Support issue tracker\n\nThe Support team used one of our out of the box dashboards to [see how many Support issues are open and closed per month](https://gitlab.com/gitlab-com/support-forum/insights/#/issues) with the [GitLab.com Support Tracker project](https://gitlab.com/gitlab-com/support-forum), which looks at support issues raised by GitLab.com users that don't go through the Support team.\n\n![Support issue tracker](https://about.gitlab.com/images/blogimages/support_issue_tracker.png){: .shadow.medium.center}\nThe Support team also uses one of our out of the box dashboards that tracks the number of issues open and closed each month.\n{: .note.text-center}\n\n\"This shows that [the dashboard] is quite useful out of the box to just see some visualizations without doing any configuration,\" says Mark. \"These were the charts that we thought would give the most value to a team or to a project without doing any config whatsoever.\"\n\n## How our Quality Engineering team uses Insights\n\nThe Quality Engineering team uses Insights to look at opportunities to remedy gaps in a specific project in our EE, as well as to visualize flaky tests on GitLab based on reported issues.\n\n### Enterprise Edition testcases chart\n\nOne of our more specific use cases is the Enterprise testcases chart. The Quality Engineering department is working to close the gap in testcases in the GitLab Enterprise. The team [configured a chart](https://gitlab.com/gitlab-org/quality/testcases/insights/#/eeTestcasesCharts) within the [testcases project](https://gitlab.com/gitlab-org/quality/testcases/tree/master) to help visualize how many open and closed test gaps there are, separated by GitLab product area, and GitLab product tier.\n\n![EE testcases chart](https://about.gitlab.com/images/blogimages/EE_testcases.png){: .shadow.medium.center}\nQuality Engineering configured this chart to visualize gaps in testcases on GitLab Enterprise.\n{: .note.text-center}\n\n\"Looking at this chart, we may say, ‘Maybe we should have a few people focus on the gaps in verify because it has the most open testcases at the current point',\" says Kyle.\n\n#### Configuration of EE testcases chart\n\nThe EE testcases chart is not something that is available out of the box, but the [configuration for the chart](https://gitlab.com/gitlab-org/quality/testcases/blob/master/.gitlab/insights.yml) is pretty simple nonetheless.\n\n```\neeTestcasesCharts:\n  title: 'Charts for EE Testcases'\n  charts:\n    - title: Open testcases (backlog) by stage\n      type: bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - \"Quality:EE test gaps\"\n        collection_labels:\n          - \"devops::configure\"\n          - \"devops::create\"\n          - \"devops::protect\"\n          - \"devops::enablement\"\n          - \"devops::growth\"\n          - \"devops::manage\"\n          - \"devops::monitor\"\n          - \"devops::package\"\n          - \"devops::plan\"\n          - \"devops::release\"\n          - \"devops::secure\"\n          - \"devops::verify\"\n```\n{: .language-ruby}\n\nThe configuration shows that this is a bar chart that is looking at open issues with the filter `Quality:EE test gaps`. The collection labels are what broke the bars out into different columns. While it is possible to illustrate the data in very intricate ways, the underlying schema to configure the chart is actually quite simple, mirroring the process of searching the issue tracker by filtering based on labels.\n\n![Issue tracker](https://about.gitlab.com/images/blogimages/issue_tracker_EE.png){: .shadow.medium.center}\nThe issues represented in the EE testcases chart can be searched for by label using the issue tracker in the testcases project.\n{: .note.text-center}\n\nOpening the issue tracker for the testcases project, you can search by `Quality:EE test gaps` label, select open issues, to see the actual issues represented by the Insights chart.\n\nThe key takeaway: If your team has good label hygiene and a logical workflow, building charts based on Insights should not be particularly challenging.\n\n### End-to-end transient failures\n\nThe Quality Engineering team monitors how often we have reports of flaky tests in our pipeline by looking at the number of issues created that fit the label schema.\n\n![End-to-end transient failure chart](https://about.gitlab.com/images/blogimages/end_to_end_chart.png){: .shadow.medium.center}\nA second chart configured for Quality Engineering is the end-to-end transient failure chart, which looks at flaky tests.\n{: .note.text-center}\n\nSimilar to many of our other charts, this is a stacked bar graph that looks at both open and closed issues on a weekly basis, and the underlying configuration is as you might expect.\n\n```\ntransientFailures:\n  title: End to end transient failures\n  charts:\n    - title: Opened transient failures per week\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - \"Quality\"\n          - \"QA\"\n          - \"bug\"\n        collection_labels:\n          - \"found:gitlab.com\"\n          - \"found:canary.gitlab.com\"\n          - \"found:staging.gitlab.com\"\n          - \"found:staging-orchestrated\"\n          - \"found:dev.gitlab.com\"\n          - \"found:nightly\"\n          - \"found:in MR\"\n        group_by: week\n        period_limit: 24\n    - title: Closed transient failures per week\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: closed\n        filter_labels:\n          - \"Quality\"\n          - \"QA\"\n          - \"bug\"\n        collection_labels:\n          - \"found:gitlab.com\"\n          - \"found:canary.gitlab.com\"\n          - \"found:staging.gitlab.com\"\n          - \"found:staging-orchestrated\"\n          - \"found:dev.gitlab.com\"\n          - \"found:nightly\"\n          - \"found:in MR\"\n        group_by: week\n        period_limit: 24\n```\n{: .language-ruby}\n\n## Implementing Insights for your team\n\nIf your team is often pulling data from GitLab through an API or CSV export, and then building charts based on issues and merge request data, then Insights will make your life a lot easier!\n\nSome questions to think about before implementing Insights include: How would you want to categorize the work being done and the issues that are being created? How do you want to monitor the open/close rates on your issues? Also, how do you plan on using labels?\n\nInsights users really need to define their workflows and have a clear idea about how they're using labels. We recommend having some sort of [automated mechanism to ensure good label hygiene](/handbook/engineering/quality/triage-operations/#triage-automation). [GitLab Triage](https://gitlab.com/gitlab-org/gitlab-triage) is our open source project that automates labeling of issues on our giant GitLab project and is a good candidate for any organization that has a large backlog of issues.\n\nWe recommend users [read up more on the issues workflow](https://docs.gitlab.com/ee/development/contributing/issue_workflow.html) to learn more about how to use labels and the issue tracker, which is valuable background knowledge to improve your use of Insights.\n\nWe've been dogfooding Insights for a time to help iron out any wrinkles in the implementation or application of this technology, but we also want to hear your ideas of how we can make improvements to Insights. [Create an issue in the GitLab project issue tracker](https://gitlab.com/gitlab-org/gitlab/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=insights) with the Insights label to share your feedback with us!\n\nCover photo by [Aaron Burden](https://unsplash.com/@aaronburden) on [Unsplash](https://unsplash.com/photos/Qy-CBKUg_X8).\n{: .note.text-center}\n",[677,1440,9],{"slug":787,"featured":6,"template":680},"content:en-us:blog:insights.yml","Insights","en-us/blog/insights.yml","en-us/blog/insights",{"_path":4324,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4325,"content":4331,"config":4337,"_id":4339,"_type":14,"title":4340,"_source":16,"_file":4341,"_stem":4342,"_extension":19},"/en-us/blog/international-womens-day-gitlab-initiatives",{"title":4326,"description":4327,"ogTitle":4326,"ogDescription":4327,"noIndex":6,"ogImage":4328,"ogUrl":4329,"ogSiteName":667,"ogType":668,"canonicalUrls":4329,"schema":4330},"Happy International Women’s Day! How we’re working to inspire and educate women in STEM","We're shining a light on some of the initiatives we're proud to support, helping us to give back and foster a global community of women in technology.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680483/Blog/Hero%20Images/international-womens-day.jpg","https://about.gitlab.com/blog/international-womens-day-gitlab-initiatives","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Happy International Women’s Day! How we’re working to inspire and educate women in STEM\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stephanie Garza\"}],\n        \"datePublished\": \"2019-03-08\",\n      }",{"title":4326,"description":4327,"authors":4332,"heroImage":4328,"date":4334,"body":4335,"category":808,"tags":4336},[4333],"Stephanie Garza","2019-03-08","\n\nAs one of our six [core values](https://handbook.gitlab.com/handbook/values/), diversity is more than just a single project or initiative for GitLab.\nIt’s crucial for the success of our globally distributed team, and for the future of the tech industry as a whole.\nGitLab aims to make a significant impact in efforts to foster an environment where everyone can thrive.\nWe have designed a multidimensional approach to ensure we uphold a culture which embodies transparency, opportunity, and open communication.\n\nAs we celebrate [International Women’s Day](https://www.internationalwomensday.com/) today, we’re taking a moment to reflect on the progress so far, while recognizing there’s lots of work to be done to [#BalanceforBetter](https://www.internationalwomensday.com/Theme).\n\n## We're on a mission to support organizations where women thrive\n\nWe hope to shine a light on some of the initiatives we’re passionate about to help build awareness and encourage others to get involved.\n\n### Free workshops with Django Girls, Girls Get Geeky, and Rails Girls\n\nDjango Girls and GitLab partner to provide women free code workshops across the globe.\n[Django Girls](https://djangogirls.org/) (DG) strives to empower women to pursue careers in technology.\nThe free workshops equip women with a solid coding curriculum to kick start their professional journey.\nAlong with DG, we partner with [Girls Get Geeky](https://girlsgetgeeky.com/) and [Rails Girls](http://railsgirls.com/), organizations created to inspire and educate young women in tech.\nThe free workshops provide community, networking, and coding lessons to women of various backgrounds. The women share their goals, dreams (and delicious treats), which GitLab happily supports.\n\n### GitLab Diversity Sponsorship\n\nThrough the [GitLab Diversity Sponsorship program](/community/sponsorship/), we are able to contribute financially to the initiatives.\nThe goal is to foster a community of organizations with the desire to inspire, encourage, and empower women.\nWe have had the pleasure of partnering with [Wonder Women in Tech](https://wonderwomentech.com/), [FemPower](https://www.fempowerafrica.com.ng/), [Women Who Code](https://www.womenwhocode.com/), and [Women Hack](https://womenhack.com/events/), other incredible female-focused powerhouses in the industry.\nThe collaborations allow Gitlab to connect on a greater scale with amazing women around the world. Visit our [Sponsorships page](/community/sponsorship/) to find out more and to apply.\n\nThe greater GitLab team is actively striving to impact change, raise awareness, and fully support global initiatives.\nWe came together at a recent summit to promote the [STEM Gems](https://stemgemsbook.com/), the foundation devoted to giving girls role models in Science, Technology, Engineering, and Mathematics (STEM).\n[GitLab team-members came together to share their stories](/blog/stem-gems-give-girls-role-models/) in hopes of inspiring women to pursue STEM.\nThese collaborations allow GitLab to connect on a greater scale with amazing women around the world. We hope to inspire the community to join us in our pursuit to provide opportunity. Visit the organizations' websites to learn more about contributing through volunteering, mentoring, or sponsoring. \n\n## Our goals for 2019 are even more aggressive\n\nWith the development of the [GitLab Mentorship program](https://gitlab.com/gitlab-com/people-ops/General/issues/178) we hope to inspire and motivate women from across the globe.\nThe goal is to contribute to the development of a better trained and engaged community.\nOur first round of applications is already in and, once paired, mentors will help mentees learn the ropes at the company, develop relationships across the organization, and identify skills to work on developing.\nThe next cohort of mentee applications will open in August.\n\nEducation and encouragement play a vital role in women’s pursuits.\nIn a typically male-dominated field, it’s important for women to come together and support, mentor, and encourage one another.\nThe women of GitLab embody this belief, taking on various projects, workshops, and volunteer opportunities.\nWe rally together to connect our distributed team.\n\nOf course, these initiatives are just a drop in the ocean.\nUniting our team through charity, sponsorship, and mentoring is one stride toward making global change.\nNarrowing the gender gap will remain a constant goal.\nWe aim to provide all GitLab team-members with the opportunity to thrive, contribute, and succeed.\nA balanced and inclusive team will accelerate our potential. [#BalanceforBetter](https://twitter.com/search?q=balance+for+better)\n\nPhoto by [Şahin Yeşilyaprak](https://unsplash.com/photos/SNm9Re4pL9M?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/hot-air-balloon?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText  )\n{: .note}\n",[267,9],{"slug":4338,"featured":6,"template":680},"international-womens-day-gitlab-initiatives","content:en-us:blog:international-womens-day-gitlab-initiatives.yml","International Womens Day Gitlab Initiatives","en-us/blog/international-womens-day-gitlab-initiatives.yml","en-us/blog/international-womens-day-gitlab-initiatives",{"_path":4344,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4345,"content":4351,"config":4357,"_id":4359,"_type":14,"title":4360,"_source":16,"_file":4361,"_stem":4362,"_extension":19},"/en-us/blog/introducing-gitlab-dedicated",{"title":4346,"description":4347,"ogTitle":4346,"ogDescription":4347,"noIndex":6,"ogImage":4348,"ogUrl":4349,"ogSiteName":667,"ogType":668,"canonicalUrls":4349,"schema":4350},"Introducing GitLab Dedicated, our new single-tenant SaaS offering","Learn more about this offering, why we developed it and how customers can be added to our limited availability waitlist.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682524/Blog/Hero%20Images/screenshot-2022-11-30-at-7.49.51-am.png","https://about.gitlab.com/blog/introducing-gitlab-dedicated","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing GitLab Dedicated, our new single-tenant SaaS offering\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2022-11-30\",\n      }",{"title":4346,"description":4347,"authors":4352,"heroImage":4348,"date":4354,"body":4355,"category":675,"tags":4356},[4353],"David DeSanto, Chief Product Officer, GitLab","2022-11-30","\n\nToday, we are excited to officially announce the limited availability of GitLab Dedicated, a new way to use our enterprise DevSecOps platform as a single-tenant SaaS offering. This new offering provides all of the benefits of an enterprise DevSecOps platform, with an added focus on data residency, isolation, and private networking to meet compliance needs. \n\n## Navigating compliance complexities\n\nAt GitLab, we serve a wide variety of customers — from small start-ups and community organizations to the largest global enterprises — and we know that no single deployment model will serve the needs of all of our customers.\n\nGitLab customers have told us they need a SaaS offering that provides additional deployment control and data residency to meet stringent compliance requirements. We see this need with large enterprises and companies in regulated industries that are coming under increased scrutiny, facing global internet policy fragmentation, and are dealing with the expanding complexity of data governance. The need to be compliant and secure has never been greater.\n\nEven non-regulated organizations find compliance a real threat to productivity and profits. In our [2022 Global DevSecOps Survey](/developer-survey/), we found that operations professionals are increasingly responsible for all compliance, and a majority of them spend between one-quarter and one-half of their work week managing compliance and audits. That’s a 15% increase from 2021. \n\n## When multi-tenant SaaS is not an option\n\nIn many organizations, the cloud is a substantive way to consume enterprise applications without the overhead of self-hosting. But for some industries, the multi-tenant nature of cloud-based SaaS services makes it an impossible choice due to regulatory restrictions. Some organizations need more choice between how they manage their data and where that data sits, and these decisions shouldn't have to come at the expense of efficiency and productivity. \n\n## Balance compliance with speed and efficiency with single-tenant SaaS\n\nI’m excited that we are offering a new deployment option by making our DevSecOps platform available as a single-tenant SaaS solution. GitLab Dedicated provides all of the benefits of an enterprise DevSecOps platform with a focus on data residency, isolation, and private networking to meet compliance needs. With GitLab Dedicated, organizations can leverage the efficiency of the cloud while still getting a completely isolated instance — without the need to deploy and manage a DevSecOps platform and cloud infrastructure themselves.\n\n### Data residency and protection\n\nGitLab Dedicated enables organizations to respond to the increasing number of countries and regions that are establishing unique data residency rules. By choosing the [cloud region that works for them and their regional requirements](https://docs.gitlab.com/ee/subscriptions/gitlab_dedicated/#aws-regions-not-supported), organizations can keep their data local to meet data isolation and residency requirements. It’s an efficient way to stay compliant and performant without the overhead of self-hosting.  \n\nTo further protect customer data, GitLab Dedicated supports a secure, private connection between the organization’s network and our service. This means that users, data, and services have secure access to the isolated instance without exposing services directly to the internet.\n\n### Managed and hosted by GitLab\n\nGitLab Dedicated is not only single-tenant, region-based, and privately connected, but it’s also managed and hosted by GitLab and deployed in the customer’s cloud region of choice. Organizations can quickly realize the value of a DevSecOps platform without requiring staff to build out and manage infrastructure. Organizations get all of the benefits of GitLab — shorter cycle times, lower costs, stronger security and more productive developers — with lower total cost of ownership and quicker time to value than hosting themselves.\n\n## Join the waitlist\n\nI’m truly excited to announce limited availability of GitLab Dedicated, which will bring more flexibility and greater choice to enterprise customers and organizations in highly regulated industries that have complex compliance and data residency requirements. The offering provides the efficiencies of the cloud, but with infrastructure-level isolation and data residency controls. \n\n**As we scale this new offering, we are making GitLab Dedicated available by inviting customers to join our waitlist. You can learn more and join the waitlist [on our website](/dedicated/) and get more information about the direction of the offering and [the timeline to General Availability](/direction/saas-platforms/dedicated/).**\n",[1440,9,1342],{"slug":4358,"featured":6,"template":680},"introducing-gitlab-dedicated","content:en-us:blog:introducing-gitlab-dedicated.yml","Introducing Gitlab Dedicated","en-us/blog/introducing-gitlab-dedicated.yml","en-us/blog/introducing-gitlab-dedicated",{"_path":4364,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4365,"content":4370,"config":4376,"_id":4378,"_type":14,"title":4379,"_source":16,"_file":4380,"_stem":4381,"_extension":19},"/en-us/blog/introducing-gitlab-s-integrated-development-environment",{"title":4366,"description":4367,"ogTitle":4366,"ogDescription":4367,"noIndex":6,"ogImage":1549,"ogUrl":4368,"ogSiteName":667,"ogType":668,"canonicalUrls":4368,"schema":4369},"Meet the GitLab Web IDE","Here's how we went from a proof of concept to a new feature that makes it even easier for everyone to edit inside of GitLab.","https://about.gitlab.com/blog/introducing-gitlab-s-integrated-development-environment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet the GitLab Web IDE\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dimitrie Hoekstra\"}],\n        \"datePublished\": \"2018-06-15\",\n      }",{"title":4366,"description":4367,"authors":4371,"heroImage":1549,"date":4373,"body":4374,"category":743,"tags":4375},[4372],"Dimitrie Hoekstra","2018-06-15","\n\nGitLab has been doing much more for the application development workflow than just source code management and versioning for a while – now spanning everything from [portfolio management](https://docs.gitlab.com/ee/user/group/epics/index.html#epics) to the [entire DevOps lifecycle](/blog/from-dev-to-devops/). Having everyone work from and be familiar with the same interface has many advantages.\n\nAll that code that gets automatically tested and deployed to production has a human at its source though. With the speed of innovation in today’s web development, we saw a chance to help out both new as well as seasoned developers with writing, reviewing, and committing that code with more confidence. In [GitLab 10.7](/releases/2018/04/22/gitlab-10-7-released/) we released the first iteration of our Web IDE – here's how it happened.\n\n## From experiment towards product\n\nThe original idea came from staff developer [Jacob Schatz](/company/team/#jakecodes), who observed how non-developers were having a hard time editing multiple files and getting those changes committed.\n\nAlthough having discussed implementing an Integrated Development Environment (IDE) into GitLab with our CEO [Sid](/company/team/#sytses) and VP of Product [Job](/company/team/#Jobvo) before, it was never clear how to do that and what exact problems it would solve.\n\nAt some point, it dawned on us that the repository view might be the right vessel. Jacob set up a proof of concept where he made our file viewer work in the context of a file editor. It removed the page refresh when switching between files and it approached editing from a branch perspective instead of per file. The result was the beginning of the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/), although it was called the \"repo editor\" at that time.\n\n![Proof of concept multi-file editor](https://about.gitlab.com/images/blogimages/webide/multifileeditor.png){: .shadow.medium.center}\n\nSetting up that proof of concept was a [tremendous amount of work](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12198) and was time-boxed to one month. Jacob also had other responsibilities, and there was still a long way to go from concept to minimal viable product (MVP).\n\nProduct, UX, and other developers got involved to see if this could be pushed towards production. The concept solved a problem, but did it align with our vision? How could we holistically integrate this and make it a great experience? How could we get it to perform well for many different users?\n\n## The next phase\n\nIt took some time, but it was clear that we were aiming for a real integrated development experience, accessible for everyone right within the GitLab UI, without anything to install. The idea grew from the \"Repo editor\" into that of the \"Web IDE.\"\n\nGitLab itself is open source (or rather [open core](/blog/gitlab-is-open-core-github-is-closed-source/)) and relies on many open source projects for its development. Jacob had already decided that the [Monaco editor](https://microsoft.github.io/monaco-editor/) was the perfect code editor to integrate. It had already proven itself within different contexts, was great for performance, and so could be considered a [boring solution](https://handbook.gitlab.com/handbook/values/#efficiency).\n\nOur UX manager [Sarrah Vesselov](/company/team/#SVesselov) did the initial design for the concept after which it got passed on to me. It was up to our platform product manager [James Ramsay](/company/team/#jamesramsay), our frontend engineering manager [Tim Zallman](/company/team/#tpmtim), senior frontend engineer [Phil Hughes](/company/team/#iamphill), and I as the UX Designer to redefine the prototype \"multi-file editor\" into the foundation capable of supporting our vision of an Integrated Development Environment with live previews and web terminals, that enables anyone to contribute.\n\n## Iterating on user experience\n\n### An integrated editor\n\nThe original \"multi-file editor\" was about committing multiple changes at once because this was annoying when updating the handbook or docs. Often those changes touched multiple files. It was a prototype that made it easier for people to contribute.\n\nThe more we thought about this idea, the greater the possibilities became. One of GitLab's unique advantages is being an integrated product. Building an editor that was integrated with GitLab and made it easier for anyone to contribute is a natural fit. However, the starting point of a prototype in the file list and blob editor wouldn't have been enough to handle this. Decoupling this was the first actionable item.\n\n>One of GitLab's unique advantages is being an integrated product. Building an editor that was integrated with GitLab and made it easier for anyone to contribute is a natural fit.\n\nThis change, which required a lot of discussion and a considerable amount of engineering work by our developers Phil and Tim, was where the project pivoted towards its new direction. The Web IDE got a lot more screen real estate as it no longer had to make room for the project sidebar and other page elements. We decided that the Web IDE would edit one branch at a time only and conceptualized the initial Git flow into the editor. Based on existing UI paradigms and inspired by other code editors like [VSCode](https://code.visualstudio.com/) and [Atom](https://atom.io/), we arrived at the well-known, three-pane layout.\n\n\u003Cdiv class=\"compare-images-2\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-iteration-0-concept.png\" class=\"compare-image-top shadow\" alt=\"multi file editor concept\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-iteration-1-concept.png\" class=\"compare-image-bottom shadow\" alt=\"web ide file editor concept\">\n\u003C/div>\n\nEven seasoned developers were once beginners, and getting new people accustomed to the Git workflow continues to be notoriously hard to tackle. We decided therefore that the core of the Web IDE experience should be stable before we can venture into more advanced concepts. We set out to make the \"editing to committing\" experience as good as possible and to create a foundation on which we can expand.\n\nEven while having [these discussions](https://gitlab.com/gitlab-org/gitlab-ce/issues/44316), development never stood still. We quickly had a working version of the Web IDE that relied on the Monaco editor. Our immediate efforts pushed towards getting that to a functional, viable state.\n\n### A review state\n\nDue to the potency of the Monaco editor, it became clear we had many options to choose from as to what to develop next. A review state was high up on that list, as it should be obvious what you are going to commit. Not only that, it introduced the possibility of being able to have an integrated merge request review experience in the context of the editing experience – something that has not been possible before.\n\nThis introduced the problem of managing states. After much discussion, we decided to go for editor states instead of file-specific states. Both the user perspective as well as the technical implementation benefited from this as it reduced complexity. It meant you were either editing your files or reviewing your changes across the files you had opened.\n\n![Web IDE edit and review states](https://about.gitlab.com/images/blogimages/webide/web-ide-states.png){: .shadow.medium.center}\n\nAt this point, we are nearing the current state of the Web IDE, though in GitLab 10.8 we could finally [realize the \"editing to committing\" experience](https://gitlab.com/gitlab-org/gitlab-ce/issues/44846) that we talked about before and which was conceptualized and [prototyped](https://framer.cloud/Cojmw/index.html) while developing GitLab 10.7. This was made possible as development reached a more stable state.\n\n### Deciding on hierarchy\n\nThe new experience had several objectives. It needed to introduce a more logical hierarchy for the panes to operate in. Based on that we could decide which panes would potentially show what information and where we could fit in any future more advanced features.\n\nThe second objective was to guide the user more intuitively from editing to committing. The editing and reviewing experience up until then showed its shortcomings as it was hard to switch modes and unclear when you were doing a good job. If even seasoned developers had a hard time using it, how could people just starting out ever hope to successfully contribute making use of it?\nJames and I went through many concepts and discussed both flow and hierarchy before getting into detailed mockups. Through the iterations, it became apparent we preferred our hierarchy to act from left to right. We decided we needed a similar paradigm as the activity bar shown in VSCode. The editor became far more usable as state changes were just one click away, regardless of which state you were already using. As committing was now a separate state as well, it brought a linearity to the entire flow as seen from the activity bar.\n\nThe last significant detail, which came out of a discarded design iteration, was a button to guide the user towards committing their changes. It introduced a little section at the bottom of each state with a blue commit button and a counter so you can see how many changes you have made – essential as we repurposed the right sidebar.\n\n\u003Cdiv class=\"compare-images-3\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-left-1.png\" class=\"compare-image-top shadow\" alt=\"web ide revised concept edit mode\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-left-2.png\" class=\"compare-image-middle shadow\" alt=\"web ide revised concept review mode\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-left-3.png\" class=\"compare-image-bottom shadow\" alt=\"web ide revised concept commit mode\">\n\u003C/div>\n\n*Interested to see all iterations the concepts have gone through? Check out my [Web IDE directory](https://gitlab.com/gitlab-org/gitlab-design/tree/master/progress/dimitrie/web-ide) in GitLab's open source design library where we contribute all our design files!*\n\n## Just the beginning\n\nThe current state of the Web IDE is still only the beginning. We are planning for an even better experience in the future: one where we can integrate and support more advanced features, such as a live environment to test your code against and code review discussions which are directly resolvable.\n\nIn GitLab 11.0, shipping next Friday, we will already have the following improvements: you will be able to view the latest pipeline status and the job logs directly in context, and you will be able to quickly switch between both assigned and authored merge requests without leaving the Web IDE!\n\nThis and more will inevitably lead towards more interesting design decisions to be made. Some of these concepts are uncharted territory and are sure to be valuable to further speed up development and give developers more confidence. Our hope is that this is a valuable contribution to both the open source community as well as GitLab itself.\n\nDo you have great ideas to push this effort forwards or want to contribute yourself? Check out the [issue tracker](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=web%20ide)!\n",[9,3138,700],{"slug":4377,"featured":6,"template":680},"introducing-gitlab-s-integrated-development-environment","content:en-us:blog:introducing-gitlab-s-integrated-development-environment.yml","Introducing Gitlab S Integrated Development Environment","en-us/blog/introducing-gitlab-s-integrated-development-environment.yml","en-us/blog/introducing-gitlab-s-integrated-development-environment",{"_path":4383,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4384,"content":4390,"config":4396,"_id":4398,"_type":14,"title":4399,"_source":16,"_file":4400,"_stem":4401,"_extension":19},"/en-us/blog/introducing-our-statement-of-support",{"title":4385,"description":4386,"ogTitle":4385,"ogDescription":4386,"noIndex":6,"ogImage":4387,"ogUrl":4388,"ogSiteName":667,"ogType":668,"canonicalUrls":4388,"schema":4389},"Introducing our Statement of Support","Our Statement of Support defines how and what we support in terms of our products, services, and applications. Here's an explainer on what you can expect from us.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678875/Blog/Hero%20Images/support-series-cover.png","https://about.gitlab.com/blog/introducing-our-statement-of-support","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing our Statement of Support\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tom Cooney\"}],\n        \"datePublished\": \"2018-12-20\",\n      }",{"title":4385,"description":4386,"authors":4391,"heroImage":4387,"date":4393,"body":4394,"category":299,"tags":4395},[4392],"Tom Cooney","2018-12-20","\n\nRecently, our team released an update to our [support page](/support/), with a new Statement of Support. This is a document defining how and what we support in terms of our products, services, and applications. Many of the policies defined in this document have existed before, and nearly all of it was what we practiced all along. However, we wanted to be clearer on our policies and stricter in practicing them to ensure we're providing the best level of support to our customers and the community.\n\nI want to cover two areas of our Statement of Support that might need a bit more clarification.\n\n## Scope of support\n\nOne of the more important parts of the Statement of Support is defining the scope of support. Scope of support, in the simplest terms, is what we support and what we do not. Ideally, we would support everything. However, without drastically reducing the quality of our support or increasing the price of our products this would be impossible. The \"limitations\" actually help us to create a more consistent and efficient support experience.\n\n### Scope allows us to streamline our expertise\n\nThis means our support teams will be experts in the necessary fields rather than being stretched thin. We don't want to widen our coverage at the cost of depth. There are core aspects of our services that we _need_ to nail down, and we can't afford to be shallow in these areas.\n\n### Strict adherence to the scope means more consistent support\n\nTeams that don't follow (or have) a scope of support often enable customers to play \"support roulette\" – a practice where a customer might continually contact support for the same issue in the hopes of finding a representative that is willing and able to extend outside of scope. We don't think this is a good experience for either side. We want there to be clear understanding of what support will do.\n\nSo, what do we support and what do we not?\n\n## Support for paid users\n\nFor our paid users, we have pages describing [the scope of support for GitLab.com (SaaS)](/support/statement-of-support/#gitlabcom) and [the scope of support for GitLab Self-managed Licenses](/support/statement-of-support/#self-managed).\n\nWhat about free users? For our wider community members who don't need the features in our paid plans, I'd like to go a bit more in-depth.\n\n## Community-first support for free users\n\nFor our free users, support options will always be \"community first.\" Official support at GitLab is a paid feature, and we encourage our free users to first use resources such as our [documentation](https://docs.gitlab.com) and [community forums](https://forum.gitlab.com).\n\nWhen you run into bugs or have feature requests, it is best to submit an issue to the appropriate issue trackers. For example, the [Community Edition Issue Tracker](https://gitlab.com/gitlab-org/gitlab-ce/issues) or the [GitLab Runner Issue Tracker](https://gitlab.com/gitlab-org/gitlab-runner/issues) would cover bug reports and feature requests for GitLab and GitLab Runner. You can browse all of the repositories in our open-core offering in the [gitlab-org](https://gitlab.com/gitlab-org) group on GitLab.com.\n\nThis type of community-first approach is standard in open source/core and free-to-use applications. Internally, we used the support model of a very popular open source application, [WordPress](https://wordpress.org/support/), and in particular, [WordPress.com (SaaS)](https://en.support.wordpress.com/contact/), when designing our Statement of Support.\n\nIt's a major goal of ours to better foster the community resources. We want the answers to general technical questions about GitLab to be readily available, and we want to build a strong community that helps one another solve issues. We are working with our community team to make the forums more active. It is a _community_ resource, however, and we would appreciate your contributions here as well.\n\nHaving said this, we understand there are certain things that require the attention of our support team, even for our free users. If in doubt, please do submit a ticket, and our team will triage as appropriate. We don't want there to be any discouragement in seeking help from our team. Please understand, however, that priority may go to paid users.\n\nSee [more details on the scope of support for free users](/support/statement-of-support/#free-plan-users).\n\n## Summing up\n\nThe GitLab support team is here to help. Whether you are a free user or an Ultimate customer, we want to ensure you have the best experience possible. We believe that this Statement of Support is an important tool in that process. Everyone at GitLab is a member of the GitLab community and we all play a role in making the entire ecosystem better. We look forward to working with you all to make our projects, issue trackers and forums better, more resource-rich, places.\n",[9,675],{"slug":4397,"featured":6,"template":680},"introducing-our-statement-of-support","content:en-us:blog:introducing-our-statement-of-support.yml","Introducing Our Statement Of Support","en-us/blog/introducing-our-statement-of-support.yml","en-us/blog/introducing-our-statement-of-support",{"_path":4403,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4404,"content":4410,"config":4416,"_id":4418,"_type":14,"title":4419,"_source":16,"_file":4420,"_stem":4421,"_extension":19},"/en-us/blog/introducing-the-gitlab-ai-transparency-center",{"title":4405,"description":4406,"ogTitle":4405,"ogDescription":4406,"noIndex":6,"ogImage":4407,"ogUrl":4408,"ogSiteName":667,"ogType":668,"canonicalUrls":4408,"schema":4409},"Introducing the GitLab AI Transparency Center","This new initiative will help our community understand how we uphold governance and transparency in our AI products.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098448/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_4YvWyVQu8Q1g31ZVjlDOkH_1750098447812.png","https://about.gitlab.com/blog/introducing-the-gitlab-ai-transparency-center","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing the GitLab AI Transparency Center\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robin Schulman\"}],\n        \"datePublished\": \"2024-04-11\",\n      }",{"title":4405,"description":4406,"authors":4411,"heroImage":4407,"date":4413,"body":4414,"category":1839,"tags":4415},[4412],"Robin Schulman","2024-04-11","GitLab is dedicated to responsibly integrating artificial intelligence (AI) throughout our comprehensive DevSecOps platform. We offer GitLab Duo, a [full suite of AI capabilities](https://about.gitlab.com/gitlab-duo/) across the GitLab platform, so that our customers can ship better, more secure software faster. GitLab Duo follows a privacy- and transparency-first approach to help customers confidently adopt AI while keeping their valuable assets protected.\n\nGenerative AI is moving so quickly and we know it presents a host of novel questions about the privacy and safety of this technology. In GitLab's [2023 State of AI in Software Development report](https://about.gitlab.com/developer-survey/#ai), more than 75% of respondents expressed concern about AI tools having access to private information or intellectual property. \n\n[Transparency is a core value at GitLab](https://handbook.gitlab.com/handbook/values/#transparency), and we take a transparency- and privacy-first approach to building our AI features to help ensure that our customers’ valuable intellectual property is protected. Accordingly, we’ve launched our [AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) to help GitLab’s customers, community, and team members better understand the ways in which GitLab upholds ethics and transparency in our AI-powered features.\n\nThe AI Transparency Center includes GitLab’s [AI Ethics Principles for Product Development](https://handbook.gitlab.com/handbook/legal/ethics-compliance-program/ai-ethics-principles/), [AI Continuity Plan](https://handbook.gitlab.com/handbook/product/ai/continuity-plan/), and our [AI features documentation](https://docs.gitlab.com/ee/user/ai_features.html).\n\n## The AI Ethics Principles for Product Development explained\n\nWe believe ethics play an important role in building AI features. For this reason, we’ve launched GitLab’s [AI Ethics Principles for Product Development](https://handbook.gitlab.com/handbook/legal/ethics-compliance-program/ai-ethics-principles/) to address what we consider to be the best practices in responsible AI development. These Principles will help guide GitLab as we continue to build and evolve our AI functionality.\n\nThe Principles specifically address five key areas of concern that GitLab monitors so that we can continue to responsibly integrate AI into our customers’ workflows:\n\n- **Avoiding unfair bias.** [Diversity, Inclusion, and Belonging](https://about.gitlab.com/company/culture/inclusion/) is also one of GitLab’s core values. It is a critical consideration when building features powered by AI systems, as there is [evidence](https://fra.europa.eu/en/publication/2022/bias-algorithm) that AI systems may perpetuate human and societal biases. GitLab will continue to prioritize Diversity, Inclusion, and Belonging when building AI features.\n\n- **Safeguarding against security risks.** GitLab is a DevSecOps platform, which means we integrate security throughout our entire product, including in our AI features. While AI brings many potential security benefits, it can also create security risks if not deployed correctly. As we do with all of our features, our goal is to mitigate these security risks in GitLab’s AI features.\n\n- **Preventing potentially harmful uses.** We strive to build AI features responsibly. We try to carefully consider the potential consequences of our AI features in order to refrain from launching features that are likely to cause, or allow others to cause, overall harm.\n\n- **Considering what data our AI features use and how they use it.** We will continue to carefully evaluate the data that our AI features use, the purposes for which we’re using this data, and who owns the intellectual property and other rights to the data, just as we do with all of GitLab’s features.\n\n- **Holding ourselves accountable.** GitLab’s mission is to make it so that [everyone can contribute](https://about.gitlab.com/company/mission/), and we welcome feedback from the GitLab community about our AI features. We will in turn aim to share our AI ethics-related findings with others in the industry where possible. We also know that AI systems, and the risk mitigations we need to employ with them, will change over time, so we are committed to continuously reviewing and iterating on our AI features and these Principles.\n\n## The AI Continuity Plan explained\n\nUnlike other DevSecOps platforms, GitLab is not tied to a single AI model provider. Instead, our AI features are powered by a diverse set of models, which helps us support a wide range of use cases and gives our customers flexibility.\n\nWe carefully select our third-party AI vendors to ensure a commitment from the vendor that they will forgo the use of GitLab and GitLab customers’ content for the developing, training, and fine tuning of vendor models.\n\nOur new [AI Continuity Plan](https://handbook.gitlab.com/handbook/product/ai/continuity-plan/) lays out GitLab’s processes when reviewing and selecting new third-party AI vendors, and when these AI vendors materially change their practices with respect to customer data.\n\n## AI features documentation \n\nIn keeping with GitLab’s core Transparency value, our [AI features documentation](https://docs.gitlab.com/ee/user/ai_features.html) clearly outlines our AI features’ purposes, underlying models, statuses, and privacy practices.\n\n## Visit the AI Transparency Center\n\nThe [AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) is publicly available in keeping with our [Transparency value](https://handbook.gitlab.com/handbook/values/#transparency) and to encourage others in the AI industry and the GitLab community to take safety, privacy, and ethics into account when building their own AI-powered functionality.\n\nWe’re excited about the opportunities that responsible AI will bring, and will continue to build our AI features with ethics, privacy, and transparency in mind.\n",[1299,9,675],{"slug":4417,"featured":6,"template":680},"introducing-the-gitlab-ai-transparency-center","content:en-us:blog:introducing-the-gitlab-ai-transparency-center.yml","Introducing The Gitlab Ai Transparency Center","en-us/blog/introducing-the-gitlab-ai-transparency-center.yml","en-us/blog/introducing-the-gitlab-ai-transparency-center",{"_path":4423,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4424,"content":4429,"config":4435,"_id":4437,"_type":14,"title":4438,"_source":16,"_file":4439,"_stem":4440,"_extension":19},"/en-us/blog/iterating-on-sso",{"title":4425,"description":4426,"ogTitle":4425,"ogDescription":4426,"noIndex":6,"ogImage":2010,"ogUrl":4427,"ogSiteName":667,"ogType":668,"canonicalUrls":4427,"schema":4428},"How we are iterating on Group Single Sign On for GitLab.com","Here's some insight into our approach to improving a key enterprise capability for GitLab.com, SSO.","https://about.gitlab.com/blog/iterating-on-sso","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we are iterating on Group Single Sign On for GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Brinkman\"}],\n        \"datePublished\": \"2019-01-17\",\n      }",{"title":4425,"description":4426,"authors":4430,"heroImage":2010,"date":4432,"body":4433,"category":299,"tags":4434},[4431],"Eric Brinkman","2019-01-17","\n\nAt GitLab, we do things a little differently. We believe in shipping what we call the MVC, or minimum viable change, rather than waiting for something to be perfect. We’d rather our customers get their hands on a portion of the feature to ensure we are on the right track and that our next iteration is spot on, than wait several months to ship a full feature that may not be exactly what customers desire. In fact, [iteration is one of our six core values](https://handbook.gitlab.com/handbook/values/#iteration) at GitLab, and it’s something that drives our day-to-day decision making. In this blog post, we’ll take a look at how a recent enterprise authentication feature challenged our organization with respect to prioritization, core values, and [transparency](https://handbook.gitlab.com/handbook/values/#transparency) with customers. We’ll also discuss our vision for GitLab.com, and the associated challenges we’ve come across while ensuring it’s a solution that works seamlessly for enterprise adoption of GitLab.\n\nSingle Sign On, or SSO, has been at the forefront of most enterprises’ digital transformation requirements for quite some time. Enterprise organizations require access to software to be controlled by their Identity Provider of choice as there are hundreds, if not thousands, of users. Manually provisioning users and revoking access across multiple systems when employees leave is not scalable and is error prone in organizations of any size.\n\nWe’ve long had [support for SAML, LDAP, and OAuth configuration](https://docs.gitlab.com/ee/administration/auth/) for self-managed GitLab, which assumes our customers have admin access at the instance level. While this works great for individual instances, a different approach is needed for GitLab.com, which is a giant, multi-tenant version of a single instance, primarily segregated at the group level for enterprises.\n\nIn [GitLab 11.0](/releases/2018/06/22/gitlab-11-0-released/#saml-single-sign-on-for-groups-beta), shipped in June 2018, we launched the [MVC to take the first step in SAML-based SSO on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/40). When we launched this functionality, we knew it wasn’t going to solve 100 percent of enterprise authentication needs, but rather than keeping this functionality private until we had other SSO features (such as automated provisioning of users and revocation of permissions), we decided to launch it to get as much feedback as possible, and to ensure our product velocity stays at the high levels we’ve come to expect.\n\nHere are some of the factors at play and how we're moving forward:\n\n### 1. We haven't always focused on enterprise features for GitLab.com\n\nGitLab.com has typically been the GitLab solution for hobbyists and small development teams. Enterprises have typically gravitated towards self-managed, self-hosted GitLab. Because of this bifurcation, enterprise features such as SSO were not prioritized as high in mid-2018.\n\n### The fix: We are now prioritizing enterprise features\n\nThis includes features like SSO at the top of our list. In 2019, enterprise customers looking to use GitLab are coming with a SaaS-first approach, led by a desire to get out of traditional hosting arrangements, shying away from long procure times, and looking for quick time to market on SaaS implementation. Most importantly, we’ve heard this directly from enough customers recently that we couldn't sit idly by and not activate on this.\n\n### 2. Security issues have burdened our Manage team\n\nThe [Manage](/stages-devops-lifecycle/) team, responsible for authentication at GitLab, has been hit with the most security issues of any team (170 open issues) and has been required to prioritize these over new feature releases. Manage has released [eight security fixes](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=closed&milestone_title=Any&label_name[]=Manage&label_name[]=security) that we've made public since September. We're proud of this work, as it’s required to protect our customers.\n\n### The fix: Measures to improve our velocity in finding and fixing security issues\n\nWe will continue to prioritize P1 security issues above all new features and functionality, consistent with our [prioritization framework](/handbook/product/product-processes/#how-we-prioritize-work) and ensuring a secure application. If GitLab isn’t a secure application where customers can trust that their data is safe and secure, all of the features in the world won’t make a difference as we won’t be around for long. In order to improve our security posture and increase the velocity at which we identify and fix security vulnerabilities, we've launched our [HackerOne Bug Bounty Program](https://hackerone.com/gitlab) with rewards of up to $12,000! [This program was launched](/blog/gitlab-hackerone-bug-bounty-program-is-public-today/) on Dec. 12, 2018 and has already paid out over $265,000 in bug bounties, over 215 reports!\n\n### 3. The Manage team has been stretched\n\nThe Manage team has an incredibly broad scope, ranging from permissions and authentication, to cycle analytics and DevOps scoring for organizations. In the few spare cycles our engineering team has had in between security issues, we had to spend time on high-severity, non-security bugfixes and promised features – like [adding smart card support](https://gitlab.com/gitlab-org/gitlab-ee/issues/726) and keeping instances more secure by [prohibiting admin impersonation](https://gitlab.com/gitlab-org/gitlab-ce/issues/40385). Simply put, we didn’t have enough resources to activate on all fronts.\n\n### The fix: We're growing to meet demand\n\nGitLab will grow from ~400 employees at the start of 2019 to ~800 by the end of the year. We will be splitting Manage into several teams, starting with the [Fulfillment team](https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/18087), allowing for more resources to push along each of these areas in parallel.\n\nGitLab.com is one of our highest-growth areas based on most Key Performance Indicators, including monthly active users, revenue, and feature usage. It’s the quickest way to get started using GitLab, and we need to do a better job knocking down barriers for large organization adoption. We’re already activating heavily on [SAML-based SSO for enterprises](https://gitlab.com/groups/gitlab-org/-/epics/731) in early 2019 and look forward to regaining our customers’ trust in being a company that quickly adapts to your feedback.\n\nIf this type of organization and [product philosophy](/handbook/product/) seems exciting to you, drop me a note at ebrinkman@gitlab.com. We will be doubling the size of the product team and are looking for talented product managers to help us scale GitLab and drive the direction and growth of our application.\n",[677,9,720],{"slug":4436,"featured":6,"template":680},"iterating-on-sso","content:en-us:blog:iterating-on-sso.yml","Iterating On Sso","en-us/blog/iterating-on-sso.yml","en-us/blog/iterating-on-sso",{"_path":4442,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4443,"content":4449,"config":4455,"_id":4457,"_type":14,"title":4458,"_source":16,"_file":4459,"_stem":4460,"_extension":19},"/en-us/blog/iteration-and-code-review",{"title":4444,"description":4445,"ogTitle":4444,"ogDescription":4445,"noIndex":6,"ogImage":4446,"ogUrl":4447,"ogSiteName":667,"ogType":668,"canonicalUrls":4447,"schema":4448},"Why small merge requests are key to a great review","Massive merge requests lead to more problems than solutions. We explain how embracing iteration can lead to a better experience for the code author and code review.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681966/Blog/Hero%20Images/broken_wood.jpg","https://about.gitlab.com/blog/iteration-and-code-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why small merge requests are key to a great review\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2021-03-18\",\n      }",{"title":4444,"description":4445,"authors":4450,"heroImage":4446,"date":4451,"body":4452,"category":743,"tags":4453},[3556],"2021-03-18","\n\nThis post is adapted from a [GitLab Unfiltered blog post](/blog/better-code-reviews/) written by me, [David O'Regan](/company/team/#oregand). In [part one of our series](/blog/tips-for-better-code-review/), we explain the importance of fairness and empathetic thinking in code reviews and in [part two we explain why patch files bring added value to code reviews](/blog/patch-files-for-code-review/).\n{: .note .alert-info .text-center}\n\nThe [GitLab handbook defines iteration as doing the smallest thing possible to get it out as quickly as possible](https://handbook.gitlab.com/handbook/values/#iteration). If there was a single guiding principle I could suggest you lean into with your merge requests it would be iteration. At its heart, software is all about iteration. Software is about taking a large problem and breaking it down into smaller, more manageable problems. Like any other skill, iteration needs to be learned and practiced often to improve. The next time you're hitting the \"Submit merge request\" button, pause a moment and think if the merge you're about to submit could be be downsized.\n\n## Why smaller MRs are better\n\nThe only thing worse than writing a long merge request is reviewing a long merge request. This is why at GitLab, iteration (and by extension, [small merge requests](https://handbook.gitlab.com/handbook/values/#make-small-merge-requests)) is one of our driving values.\n\nWe even created a [DangerBot](https://docs.gitlab.com/ee/development/dangerbot.html) that will ask code authors to break down merge requests that are over a certain size.\n\nMassive merge requests can create technical problems for a code reviewer beyond added complexity. If a review goes beyond a certain number of lines, it simply becomes too difficult to reason through without checking out the branch, booting the project, and [smoke testing](https://en.wikipedia.org/wiki/Smoke_testing_(software)). While smoke testing complex reviews is a great idea, this process shouldn't become a habit for reviewing code. Big MRs can lead to merge conflicts, content rot, and other disasters.\n\n[Sarah Yasonik](/company/team/#syasonik), backend engineer on Monitor at GitLab, suggested that reviewers handle too-large or too-complicated merge requests by creating new, smaller MRs while reviewing, and reviewing the code in chunks. It's better to break up a too-big MR than to continue adding lines of code to an MR that is already too large.\n\n### The art of the follow-up\n\nAs the code author and code reviewer, there are a few best practices to abide by. Namely, if you are a code author and you offer a follow up review, be sure you always follow through on this promise.\n\nIf you are a code reviewer, here are four tips:\n\n*   Feel empowered to ask the code author for a follow up\n*   Accept any offers of a follow up graciously\n*   Be patient with code authors\n*   Know when it's best to reject a follow up offer\n\n## Practical tips for using iteration in code reviews\n\n### Why does iteration matter?\n\nThe smaller the merge request, the easier it is for the code reviewer to check. The idea of shipping small changes is consistent with GitLab's [iteration value](https://handbook.gitlab.com/handbook/values/#iteration). Clement Ho, my frontend engineering manager who has since left GitLab, was a major champion for iteration. Once I started paying close attention to how Clement broke down merge requests into small bites, I started to notice the benefits of iteration almost immediately. Iteration is so important to GitLab that CEO [Sid Sijbrandij](/company/team/#sytses) hosts [weekly office hours devoted to breaking down big projects](/handbook/ceo/#iteration-office-hours), and grades our team members on their [iteration competency](https://handbook.gitlab.com/handbook/values/#iteration-competency).\n\n### How small merge requests helps your reviewer\n\nIf iteration is all about releasing the [minimal viable change (MVC)](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) in small merge requests, then it follows that engineers who fully embrace iteration will be shipping less code per merge request, to the delight of their reviewer.\n\nWe've all been there. We are assigned as a reviewer on an MR, and just as you're about to get comfortable you open the MR to see more than 1000 lines of code across multiple files. Time to refill your mug of coffee and get ready for a tiring review process.\n\nThe problems with large MRs should be obvious [if you've ever practiced self-reviews](/blog/tips-for-better-code-review/) or found yourself in this situation. Here are a few reasons why large MRs are indicative of bigger problems:\n\n*   Longer MRs have more lines of code\n*   There is the greater chance for brittle connections\n*   It becomes harder to follow the path of the solution/feature\n*   Screenshots usually cannot account for the volume of change\n*   It's much easier to miss bugs\n*   The author is sure to be left with lots of comments, which can be demoralizing\n\nIt's a simple concept, but one that is undervalued. Keep your merge requests small because:\n\n*   There are less lines of code to read\n*   Different contexts are separated into individual MRs\n*   The reviewer can follow along more easily\n*   It's easier to follow the path of a feature's development\n*   Less reviewer comments per MR is better for motivating the code author\n\nIn the end, we review code carefully at GitLab because we want to ensure that every release brings new value to our customers. If you have questions or comments about code reviews, creating smaller MRs, or iteration, leave us a comment on this blog post!\n\nGet more code review tips by reading the other blog posts in our series. In part one, we discuss [the role of fairness in code review](/blog/tips-for-better-code-review/) and in part two we share some [practical advice on using patch files](/blog/patch-files-for-code-review/).\n\n_Sara Kassabian contributed to this blog post._\n\nCover image by [Jon Sailer](https://unsplash.com/@eyefish73) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[4454,9],"code review",{"slug":4456,"featured":6,"template":680},"iteration-and-code-review","content:en-us:blog:iteration-and-code-review.yml","Iteration And Code Review","en-us/blog/iteration-and-code-review.yml","en-us/blog/iteration-and-code-review",{"_path":4462,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4463,"content":4469,"config":4475,"_id":4477,"_type":14,"title":4478,"_source":16,"_file":4479,"_stem":4480,"_extension":19},"/en-us/blog/just-commit-launch",{"title":4464,"description":4465,"ogTitle":4464,"ogDescription":4465,"noIndex":6,"ogImage":4466,"ogUrl":4467,"ogSiteName":667,"ogType":668,"canonicalUrls":4467,"schema":4468},"Let’s talk about commitment","What possibilities could you unlock by just making the choice, committing, and moving forward?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671258/Blog/Hero%20Images/just-commit-blog-cover.png","https://about.gitlab.com/blog/just-commit-launch","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Let’s talk about commitment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Todd Barr\"}],\n        \"datePublished\": \"2019-02-18\",\n      }",{"title":4464,"description":4465,"authors":4470,"heroImage":4466,"date":4472,"body":4473,"category":787,"tags":4474},[4471],"Todd Barr","2019-02-18","\n\nWe’re now solidly into 2019. Commitments you made to yourself, your health, your productivity, your career, your budget, or whatever the case may be – they’re probably becoming harder to keep. This pattern of making resolutions, being on our best behavior for a while, falling off the wagon, returning to our ways, then starting the whole process over in the new year is all too familiar.\n\nWith [50 percent of digital transformation efforts stalled in 2018](https://mktg.forrester.com/predictions-2019), you’ve likely experienced your own version of this at work, and are probably even somewhere in that cycle right now.\n\nThe thing is, commitment unlocks new potential. You often don’t get to the good stuff until you make that commitment – whether it’s committing to months of training and discipline, then experiencing the euphoria of completing your first marathon, or committing to your partner and building a life together.\n\nIn the software space, making that commitment can be the difference between paying lip service to DevOps transformation and actually realizing its promises. Making big changes, especially at an organizational level, is daunting. The trick is to commit to the process, not just to the goal. [Focusing on the processes and behaviors that support the goal is key to success](https://www.scienceofpeople.com/goal-setting/), so having a clear plan of attack rather than an abstract objective to achieve is what makes all the difference.\n\nHere at GitLab, we committed to being [all-remote](/company/culture/all-remote/) – allowing us to hire the best people, no matter where in the world they might be or at what times they choose to work. We went all in on [asynchronous communication](/handbook/communication/#internal-communication), conscientiously documenting everything so we could collaborate across time zones and borders. We committed to a monthly release cycle, a decision which has seen us ship, to date, 88 consecutive new releases, allowing us to work with a short feedback loop and make small adjustments and iterations along the way. It was our commitment to the process, to having a single vision and steadily marching toward it, that enabled us to build a single application for the entire DevOps lifecycle with an all-remote team.\n\nSo this is what we’re asking you to do! Just commit. To software modernization. To faster cycle times. To secure apps. And because commitment is easier when you have a plan, and accountability, we’re here to support you on the journey. Over the coming weeks, we will be rolling out a series of blog posts and guides to help you make meaningful, lasting change in your organization. From tips and success stories on how to modernize your application architecture, to finally getting on top of technical debt, and building more secure applications, we’re working with our experts, customers, and community to help you along the way.\n\nObviously, commit has a double meaning for us. Git unlocked a whole new way to collaborate on software with the humble commit. Now, at GitLab, committing unlocks a whole lot more value – faster time to market, more secure code, more modern applications. We’re asking you to just commit to these. [Are you up for the challenge?](/blog/application-modernization-best-practices/)\n\n## #JustCommit\nSo, you're committing to starting something new this year. Hooray! 🎉 It's always easier to stick to something with a buddy – tell us your commitments by tweeting us [@gitlab](https://twitter.com/gitlab) using #JustCommit, and we'll do our best to help (and enter you into our swag giveaway)! The [giveaway](/community/sweepstakes/) lasts through April, but we want to keep you committing all year long.\n",[1440,1297,9],{"slug":4476,"featured":6,"template":680},"just-commit-launch","content:en-us:blog:just-commit-launch.yml","Just Commit Launch","en-us/blog/just-commit-launch.yml","en-us/blog/just-commit-launch",{"_path":4482,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4483,"content":4489,"config":4494,"_id":4496,"_type":14,"title":4497,"_source":16,"_file":4498,"_stem":4499,"_extension":19},"/en-us/blog/keeping-your-account-safe",{"title":4484,"description":4485,"ogTitle":4484,"ogDescription":4485,"noIndex":6,"ogImage":4486,"ogUrl":4487,"ogSiteName":667,"ogType":668,"canonicalUrls":4487,"schema":4488},"How to keep your GitLab account safe (and accessible)","Some practical tips from the GitLab.com Support Team to make sure you can get into your account when (not if!) disaster strikes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666806/Blog/Hero%20Images/keep-gitlab-account-safe.jpg","https://about.gitlab.com/blog/keeping-your-account-safe","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to keep your GitLab account safe (and accessible)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lyle Kozloff\"}],\n        \"datePublished\": \"2018-08-09\",\n      }",{"title":4484,"description":4485,"authors":4490,"heroImage":4486,"date":4491,"body":4492,"category":299,"tags":4493},[1975],"2018-08-09","\nThe GitLab.com Support Team has seen a lot of unfortunate situations where people have lost access to their accounts for _very_ preventable reasons. Here are some quick tips to keep your account secure and to ensure you're always able to log in.\n\n## 1. Use 2FA (and print your recovery codes)\n\nMore and more people are [setting up two-factor authentication (2FA)](https://docs.gitlab.com/ee/user/profile/account/two_factor_authentication.html) to keep their\naccount secure, but things do happen to that second factor. Phones get lost or stolen, and suddenly you’re locked out of your account.\nPlease **do** set up 2FA, but also make sure you keep a backup of your recovery codes.\n\nIf you've got access to a printer: print them!\n\nMaybe even better? Store a copy of your recovery codes in a password manager so you won’t lose them.\n\n## 2. Set up an SSH key (so that you can generate new recovery codes)\n\nDid you know that you can [generate recovery codes with an SSH key](https://docs.gitlab.com/ee/user/profile/account/two_factor_authentication.html#generate-new-recovery-codes-using-ssh)? If you somehow lost your recovery codes, you can generate new ones by simply\nrunning `ssh git@gitlab.com 2fa_recovery_codes`. So, even if you don’t use SSH much, it might pay off to have a [key added to your GitLab account](https://docs.gitlab.com/ee/user/ssh.html).\n\n## 3. Add a backup email on your account\n\nMany of our users have vanity or company domains on their accounts. But, what happens if you leave\nthe company or forget to pay your domain registration? Suddenly you’re unable to receive password\nreset emails and are writing into Support from an unknown email address (it's difficult for the Support\nTeam to verify your identity if you contact us from a different email address).\n\nGo ahead and use a custom domain, but consider [having a backup email address](https://docs.gitlab.com/ee/user/profile/#profile-settings)\nfrom a well-known public provider.\n\n## 4. Use your real name on your account profile\n\nWhen we’re evaluating whether or not to restore an account or remove 2FA, it makes the call harder when the name on the\naccount is L33T H4X0R (unless, of course, that’s what is on your passport). We understand if you prefer not to include\nyour real name for privacy reasons, but do know that we may be unable to help you recover your accounts if all\nof the above have not been implemented. If your real name isn't an option, consider [adding other online identities to your profile](https://docs.gitlab.com/ee/user/profile/#profile-settings).\n\nHopefully these tips will help you secure and access your GitLab.com accounts. For the security and privacy of our users,\nthe Support Team is required to be very stringent when it comes to helping you recover your accounts. We hope that this\narticle will help you stay in control at all times!\n\nPhoto by [Jon Moore](https://unsplash.com/photos/bBavss4ZQcA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/security?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,720],{"slug":4495,"featured":6,"template":680},"keeping-your-account-safe","content:en-us:blog:keeping-your-account-safe.yml","Keeping Your Account Safe","en-us/blog/keeping-your-account-safe.yml","en-us/blog/keeping-your-account-safe",{"_path":4501,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4502,"content":4508,"config":4514,"_id":4516,"_type":14,"title":4517,"_source":16,"_file":4518,"_stem":4519,"_extension":19},"/en-us/blog/keys-to-success-for-product-operations",{"title":4503,"description":4504,"ogTitle":4503,"ogDescription":4504,"noIndex":6,"ogImage":4505,"ogUrl":4506,"ogSiteName":667,"ogType":668,"canonicalUrls":4506,"schema":4507},"3 keys to success for product operations","Learn how to set a foundation for product operations at your organization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682313/Blog/Hero%20Images/prodops-keys-elena-mozhvilo-Lp9uH9s9fss-unsplash.jpg","https://about.gitlab.com/blog/keys-to-success-for-product-operations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 keys to success for product operations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Farnoosh Seifoddini\"}],\n        \"datePublished\": \"2022-05-24\",\n      }",{"title":4503,"description":4504,"authors":4509,"heroImage":4505,"date":4511,"body":4512,"category":808,"tags":4513},[4510],"Farnoosh Seifoddini","2022-05-24","\n\nIt is official. Product operations is a thing. A quick Google search will pull up a long list of articles singing the praises of everything product operations has to offer, from making product managers more efficient to data collection and synthesis. \n\nWhen I first took on [product operations at GitLab](/direction/product-operations/), there wasn’t a lot of definition or guidance on the topic. I understood what product operations meant because I’d been “doing it” as an inseparable part of my product management and product leadership roles for some years. But I’d never had the opportunity to focus solely on product operations.\n\nAs excited as I was, I was also nervous. GitLab was [accelerating toward an IPO](/blog/gitlab-inc-takes-the-devops-platform-public/) and both the product management team and the product were in hyper growth mode. And, to boot, the all-remote, cross-functional teams were in motion, sync and async, day and night, all around the globe. So, I reached out to peers who had already started their product operations journey and leveraged the perspective, progress, and learnings they generously shared. And, in doing so, I realized everyone was doing it a bit differently. \n\nNow, two and a half years later, product operations is a thing at GitLab. And the most common question I get from peers reaching out to me is: How can I set up product operations for success at my organization? \n\nTo answer this question, I will assume we all want to be product-led and customer-centered, and “success” would be product operations helping us get there. I’ll also assume we agree with the sentiment that’s evolved [defining product operations responsibility](https://www.pendo.io/glossary/product-operations/) to fall into these core areas: tools, data, experimentation, strategy, and trusted advisor. \n\nWhile there is no one formula, I will share three keys that opened doors for product operations to make an impact and grow with GitLab.\n\n### 1. Empower product operations as its own function, with an equal seat alongside other value-driving functions\n\nAt GitLab, we run product operations as an independent function under the product umbrella. The direct line of responsibility to the head of all product ensures product operations has awareness, alignment, and accountability to the macro needs of the product and the business. This also allows product operations to maintain a broad and unbiased view, as well as the right level of influence, to develop strategies/tactics serving the product and the business without favor toward any particular group. This [Silicon Valley Product Group article](https://www.svpg.com/product-ops-overview/) by Marty Cagan provides more helpful context on the why of this approach. \n\n### 2. Make product operations a people-first operation\n\nBefore product operations can deliver on efficiencies and tools that are useful for the product and the business, product operations must understand all of its internal customers. The first year product operations took shape at GitLab, much of my energy was focused on building relationships, not only with product team members but across the whole organization. Becoming a trusted advisor runs deeper than just delivering data, it’s about sensing pain and building bridges. A product operations team that leads with empathy will elevate the organization rather than just serve the organization. \n\n### 3. Drive adoption of product operations strategies by providing opportunities for team ownership\n\nAt GitLab, [everyone can contribute](/company/mission/#everyone-can-contribute). Leveraging this mindset for product operations led to [more impactful and better-designed iterations](https://handbook.gitlab.com/handbook/values/#iteration) to the problems we were trying to solve. By collaborating with various team members across the organization to improve and implement the shared frameworks in the product system, we not only ensure better multi-dimensional solutions but also boost alignment and acceptance of the solutions as well. This approach also inspires team ownership of flexible workflows rather than a perception that product operations is the “enforcer” of rigid processes. \n\nThese three keys become more challenging to forge if they aren’t introduced to an organization early on. Even if not immediately feasible, it’s helpful to carve space for the philosophy upfront and start small to demonstrate the value of the approach as you build the foundation for product operations. In future posts, I will share strategies and tactics for each of these keys as well as answer the second most common question I get: What is a “product system”? \n\nIn the meantime, feel free to learn more about [what product operations drives](/direction/product-operations/) at GitLab and the product management resources we maintain in our [Product Handbook](/handbook/product/).\n\n\n\nCover image by [Elena Mozhvilo](https://unsplash.com/@miracleday?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n",[811,9,723,832],{"slug":4515,"featured":6,"template":680},"keys-to-success-for-product-operations","content:en-us:blog:keys-to-success-for-product-operations.yml","Keys To Success For Product Operations","en-us/blog/keys-to-success-for-product-operations.yml","en-us/blog/keys-to-success-for-product-operations",{"_path":4521,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4522,"content":4528,"config":4533,"_id":4535,"_type":14,"title":4536,"_source":16,"_file":4537,"_stem":4538,"_extension":19},"/en-us/blog/khosla-ventures-gitlab-meeting",{"title":4523,"description":4524,"ogTitle":4523,"ogDescription":4524,"noIndex":6,"ogImage":4525,"ogUrl":4526,"ogSiteName":667,"ogType":668,"canonicalUrls":4526,"schema":4527},"Acquisitions, growth curves, and IPO strategies: A day at Khosla Ventures","A CEO Shadow’s take on GitLab’s annual investor meeting with Khosla Ventures.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671620/Blog/Hero%20Images/khosla-ventures-meeting.jpg","https://about.gitlab.com/blog/khosla-ventures-gitlab-meeting","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Acquisitions, growth curves, and IPO strategies: A day at Khosla Ventures\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2019-04-08\",\n      }",{"title":4523,"description":4524,"authors":4529,"heroImage":4525,"date":4530,"body":4531,"category":299,"tags":4532},[3035],"2019-04-08","\n\nWhen I accepted the opportunity to participate in GitLab’s [CEO Shadow program](/handbook/ceo/shadow/), I knew exactly what to expect. In typical GitLab fashion, there was already a handbook page detailing the goal, the format, and the expectations of the program. Our co-founder and CEO, [Sid Sijbrandij](https://twitter.com/sytses), keeps his calendar [public by default](https://handbook.gitlab.com/handbook/values/#public-by-default) to GitLab team-members, so I was able to get a good understanding of the meetings I’d be attending, who’d be present, and what would be discussed. What I couldn’t have predicted is how an annual meeting between Sid and venture capitalist [Vinod Khosla](https://en.wikipedia.org/wiki/Vinod_Khosla) would turn out. \n\n## Skeptical about in-person meetings \n\nGitLab is an [all-remote](/company/culture/all-remote/) company. We don’t have any offices, and we communicate and collaborate via Zoom, Slack, Google docs, and GitLab. To me, this is normal, and, as an introvert, my biggest concern with participating in the CEO program was the energy drain I knew I would experience living in downtown San Francisco for three weeks and meeting with people in person. Luckily, even our CEO conducts most of his business remotely. \n\nHowever, many of our investors and board members are still, what I call, remote shy, and tend to default to in-person meetings. This is how I found myself traveling an hour and a half south to Menlo Park to meet with one of our investor groups, Khosla Ventures, for their annual meeting with Sid. Khosla Ventures (KV) is a venture capitalist firm founded by Vinod Khosla, co-founder of Sun Microsystems. KV and GitLab have a long history: They invested in our seed round, led our Series A, and has been a part of every fundraising round since. You could say Khosla Ventures is a big fan of GitLab, and the feeling is mutual. \n\n“I’m not sure what we’re going to get out of this. Khosla is the only investor that can get me to travel an hour and a half to meet in person, every year, without an agenda,” Sid told me. \n\nWhile I sincerely appreciate Sid’s dedication to all-remote, to [efficiency](https://handbook.gitlab.com/handbook/values/#efficiency), and to keeping things simple, I found his sentiment surprising. To me, this was a big deal. We were going to spend the day with some of the industry’s brightest minds in Silicon Valley. However, I equally appreciated his emphasis on the lack of agenda. At GitLab, [we work asynchronously](/handbook/communication/) and agendas allow people to prepare ahead of time so meetings can be interactive, discussion-oriented, and productive. A meeting without an agenda is unpredictable and can be a waste of time. \n\n## Khosla Ventures \n\nPulling up to Khosla Ventures in Menlo Park was a refreshing change of scenery from downtown San Francisco where I’d been staying. The mid-century modern style building is tucked behind lush greenery, and as soon as you enter you are unexpectedly greeted by quirky grape purple walls. Inside Khosla Ventures feels more like a creative incubator than an investor firm. \n\nWe first met with Bruce Armstrong, operating partner at Khosla Ventures and a GitLab board member. Top of mind for everyone was the board. With our plans to go public, we needed to hire new board members with public company experience. Earlier that week, I had the opportunity to sit in on a few conversations with potential board members and now I was getting the chance to hear how they were evaluating the candidates. Across the candidates, a pattern was emerging: They all had experience firing CEOs. For some CEOs this would be worrisome. For Sid, it appeared to be a point of pride: He wanted to hire the best people that were going to make the best decisions for the company in the long run. Period. \n\n## The brainstorm \n\nNext we met with Vinod, Bruce, and investment partners Brian Byun and Sven Strohband for a company brief and brainstorm session. Sid began with an update on the financials, and detailed our massive growth and expansion both in terms of people and product. There were no slides presented. Instead, Sid used our website as a visual aid. Nearly every question or discussion point was first addressed with a Google search to pull up the appropriate GitLab web page to reference. When discussing the progress of the product, he defaulted to our [homepage](https://about.gitlab.com/), where our product team has meticulously detailed our current and future feature set. During a discussion about competitors and potential partners, our [DevOps tools page](/competition/) was referenced for a single page view of all our competitors in context of exactly how we compete. \n\nI’ve been working at GitLab for two and a half years as a content writer on our marketing team and at times have been extremely frustrated with our marketing website—the content on it, how it’s organized, what we’re presenting, etc. It doesn’t look or operate in a way I’m familiar with so my instinct was to not trust it. But nothing we do at GitLab is “normal,” and witnessing our CEO use the website as a single source of public truth to inform our investors is just one example of what it means to be a [transparent company](https://handbook.gitlab.com/handbook/values/#transparency). We don’t hide behind “marketing speak” on our public facing website or develop behind closed walls. We tell the same story and share the same information with the company, the customers, the community, and yes, even the investors. \n\nWhile our website doesn’t have the same flashy graphics and pithy marketing copy I’m used to, it speaks the truth even when the truth makes people a little nervous about how we’re going to pull this off. We have an incredibly ambitious product roadmap to be built by an all remote team in a short amount of time.\n\n### Acquisition strategy\n\nSomething I’ve found surprising throughout my entire CEO Shadow experience is how external people underestimate GitLab’s ability to deliver on ambitious plans. The conversation often defaults to, “I see what you’re trying to do, but realistically, which categories are you really able to compete in?” And, unfalteringly Sid answers: all of them. There are some awkward laughs, and the question is reframed to “What part of the product are most of your customers using today?” We move on.\n\nThe conversation with Vinod Khosla was similar but different in tone. Vinod and the rest of the team were skeptical of our ambition but perhaps more attuned to Sid’s commitment to the direction and vision and thus more willing to dig into how we get there instead of why we won’t. \nPotential partnerships to help fill some of our missing functionality were discussed, but it was apparent that our plan was quickly encroaching into competitive territory among the leading contenders. \n\nInstead, there’s an acquisition strategy. To achieve our goal to deliver a single application for the entire DevOps lifecycle that is best-of-breed in every category, we are going to need some help and make some acquisitions. We already acquired [Gitter](/blog/gitter-acquisition/) and Gemnasium in order to enter into the ChatOps and security space more quickly than if we tried to build it all from scratch.\n\nNaturally, our acquisition strategy and offer was already [drafted and public in our handbook](/handbook/acquisitions/). This enabled the conversation to focus on thinking through potential companies and specific areas of our product where we may want to augment the productivity of our soon-to-be 500 internal developers with an acquisition. \n\n### The IPO date  \n\nGitLab [plans to go public on November 18, 2020](/company/strategy/#sequence) and prefers to remain an independent company with no plans of being acquired. While Vinod made it clear it’s strange to pick and make public an IPO date, at GitLab, we are driven by results and deadlines and even an IPO is no exception to the rule. The route we chose to go—traditional or direct listing—was another topic. \n\nDirect listings are historically uncommon. It wasn’t until [Spotify went public via a direct listing](https://techcrunch.com/2018/02/28/spotify-has-filed-to-go-public/) in 2018 that there was even a precedent for tech companies. Now, Slack and potentially Airbnb are rumored to be next, officially [making direct listings *a thing*](https://www.bloomberg.com/opinion/articles/2019-01-11/direct-listings-are-a-thing-now). As for GitLab, like everything else, it will come down to what’s right for us. I can report in good faith all options are being examined carefully and closely. The takeaway here is that while some might think it’s crazy of GitLab to set this ambitious goal, and Vinod might think it’s crazy to set a specific date, one thing is for sure: As a company, we’re ready and already thinking about what’s next. \n\n### Growth curves \nYou know you’re at a successful company when the VCs aren’t focused on how you’re going to meet your short-term goals or current [product vision](/direction/#vision) but are excited about the long-term vision. \n\n“Where is GitLab five years from now?” Vinod asked the room, as he stood up and drew a chart with three staggered S curves on it. He pointed to the first one, “This is where you are now.” Pointing to the other two he asked, “what comes next?” \n\nHe explained how Square started as a payment device, then to a point-of-sale system, found success, and instead of stagnating, entered into a new market via their Square Capital and Cash App offerings. You see similar trajectory with Facebook entering into the devices space with their newest Portal system. What was it going to be for GitLab? It seemed like an outrageous question to ask considering we still have this huge vision to complete but, unsurprisingly, Sid had thought about this and some things are already in the making. \n\n#### Growth curve #1: Meltano\n\n[Meltano](/blog/hey-data-teams-we-are-working-on-a-tool-just-for-you/) (model, extract, load, transform, analyze, notebook, orchestrate) is a start-up within GitLab aimed at becoming a complete solution for data teams. Similar in concept to GitLab, the goal is to create a single application for the entire data science lifecycle. The mission is similar as well: Make analytics accessible to everyone. If successful, Meltano will begin to bridge the gap between systems and data and bring the GitLab vision of everyone can contribute to even more people. \n\n#### Growth curve #2: Product assisted digital transformation  \n\nThe next idea was a real show stopper: product assisted digital transformation. Think code review as a service but expanded to culture, infrastructure, management, pipelines, process, and integrated directly into the product instead of being an outside service. Imagine if you could bootstrap and up-level your engineering teams’ skills with a product that comes with engineering best practices and support out-of-the-box. \n\n## Safe for another year\n\nAs it turns out, agenda-less meetings at Khosla Ventures can provide a ton of value. We walked out of Khosla’s office with a healthy dose of validation and criticism, and our brains buzzing with new horizons of potential to explore. I was already convinced GitLab is a great company to work for, but my experience at Khosla opened my eyes to just how unique our opportunity is. And, the on-site, half day, agenda-less meeting is good for another year. \n\nCover image by [Reza Rostampisheh](https://unsplash.com/@rezarp) on [Unsplash](https://unsplash.com/photos/-hcCm0kIaSg)\n{: .note}\n",[9,873,832],{"slug":4534,"featured":6,"template":680},"khosla-ventures-gitlab-meeting","content:en-us:blog:khosla-ventures-gitlab-meeting.yml","Khosla Ventures Gitlab Meeting","en-us/blog/khosla-ventures-gitlab-meeting.yml","en-us/blog/khosla-ventures-gitlab-meeting",{"_path":4540,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4541,"content":4547,"config":4552,"_id":4554,"_type":14,"title":4555,"_source":16,"_file":4556,"_stem":4557,"_extension":19},"/en-us/blog/learn-gitlab-devops-version-control",{"title":4542,"description":4543,"ogTitle":4542,"ogDescription":4543,"noIndex":6,"ogImage":4544,"ogUrl":4545,"ogSiteName":667,"ogType":668,"canonicalUrls":4545,"schema":4546},"GitLab tutorials for secure pipelines, Kubernetes, and more at Learn@GitLab","Learn@GitLab offers videos and self-driven demos so you can get the most out of GitLab at your own pace.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667236/Blog/Hero%20Images/Learn-at-GL.jpg","https://about.gitlab.com/blog/learn-gitlab-devops-version-control","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab tutorials for secure pipelines, Kubernetes, and more at Learn@GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2021-01-12\",\n      }",{"title":4542,"description":4543,"authors":4548,"heroImage":4544,"date":4549,"body":4550,"category":743,"tags":4551},[1877],"2021-01-12","\nAt GitLab, we often say that it's not what you know, it's knowing where to look. But sometimes, finding answers isn’t so easy.\n\nAn autonomous, [self-service](/company/culture/all-remote/self-service/#proactive-approach-to-answering-questions), self-learning, and self-searching mindset is when you operate with the idea that your question has already been answered – somewhere. But we realized that for people interested in GitLab, or even those using GitLab, learning **how** to use it wasn’t always easy to find.\n\nWhile we stress the importance of having a [single source of truth](https://handbook.gitlab.com/handbook/values/#single-source-of-truth), we realized that when it came to learning about GitLab, there were almost too many places to look. We have [GitLab University](https://docs.gitlab.com/ee/index.html), our official [GitLab](https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg) and [GitLab Unfiltered](https://www.youtube.com/channel/UCMtZ0sc1HHNtGGWZFDRTh5A) YouTube pages where we regularly upload educational content, and of course, the [docs](https://docs.gitlab.com/). We needed to find a way to consolidate self-education and make it more intuitive.\n\n## What is Learn@GitLab?\n\n[Learn@GitLab](/learn/) is a learning portal where anyone can go to find self-driven demos and videos about using GitLab. Rather than just making Learn@GitLab _one more resource_, we’re iterating on this idea and consolidating our educational content so that it’s self-driven and easy to find.\n\nThe goal for Learn@GitLab is to present high quality, and accessible technical content that is easy to find on our website to help prospects and users educate themselves about GitLab. This content will include educational technical videos, as well as simulation/click-through demos, and tutorials. The content is organized by common topics such as [DevOps Platform](/solutions/devops-platform/), [version control](/topics/version-control/) and collaboration, and continuous integration, to name a few.\n\nWe’ve picked three of our favorite videos/tutorials for you to get a quick introduction to Learn@GitLab.\n\n## The benefits of a single DevOps platform\n\nWhen we talk about the benefits of GitLab, we often talk about how it saves time and how the single application reduces toolchain complexity. But what does that mean in the context of an ordinary toolchain using tools like GitHub, Jenkins, Jira, etc.?\n\nIn this super short video, we break down a typical toolchain according to three criteria: Integrations needed, clicks, and screen switches. How many times do you need to context switch for a simple task? We break it down for you.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/MNxkyLrA5Aw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Adding security to your GitLab CI/CD pipeline\n\nGitLab helps teams go from DevOps to DevSecOps. One of the ways we help is by allowing you to check your application for security vulnerabilities in your CI/CD pipelines that may lead to unauthorized access, data leaks, denial of services, or worse. GitLab reports these vulnerabilities in the merge request so you can fix them before they ever reach end users.\n\nThis quick video guides you through setting up and configuring GitLab security features, and setting up approval rules for merge requests.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Fd5DhebtScg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## GitLab developer flow with Kubernetes\n\nIf you’re a developer, or even just managing a team of developers, you might want to see what a typical workflow would be like using GitLab. If you’re using [Kubernetes](/solutions/kubernetes/), seeing how GitLab works within a deployment environment is especially important.\n\nIn this technical demo, we use Amazon EKS as the deployment environment. We go over creating GitLab issues, merge requests, how to use Auto DevOps pipeline templates, review apps, advanced deployment techniques, and staging and production rollout – all in **just 15 minutes.**\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/TMQziI2VDbQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWhile we’ll continue to have educational content in other places on our site (and will continue to update them), Learn@GitLab will act as a front door for self education that is no more than two clicks from our homepage. With this new learning portal, we hope to teach people what problems GitLab can solve, but more importantly, show step-by-step _how_ GitLab solves them.\n\nFeel free to explore the different learning paths and comment below if you have any suggestions. Everyone can contribute.\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\n[Go to Learn@GitLab](/learn/)!\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n\nCover image by [Benjamin Davies](https://unsplash.com/@bendavisual?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/learn?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[9,231,1294],{"slug":4553,"featured":6,"template":680},"learn-gitlab-devops-version-control","content:en-us:blog:learn-gitlab-devops-version-control.yml","Learn Gitlab Devops Version Control","en-us/blog/learn-gitlab-devops-version-control.yml","en-us/blog/learn-gitlab-devops-version-control",{"_path":4559,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4560,"content":4566,"config":4571,"_id":4573,"_type":14,"title":4574,"_source":16,"_file":4575,"_stem":4576,"_extension":19},"/en-us/blog/lessons-in-iteration-from-new-infrastructure-team",{"title":4561,"description":4562,"ogTitle":4561,"ogDescription":4562,"noIndex":6,"ogImage":4563,"ogUrl":4564,"ogSiteName":667,"ogType":668,"canonicalUrls":4564,"schema":4565},"Lessons in iteration from a new team in infrastructure","A new, small team at GitLab discovered that minimum viable change applies to scaling problems too.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681724/Blog/Hero%20Images/skateboard-iteration.jpg","https://about.gitlab.com/blog/lessons-in-iteration-from-new-infrastructure-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Lessons in iteration from a new team in infrastructure\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean McGivern\"}],\n        \"datePublished\": \"2020-11-09\",\n      }",{"title":4561,"description":4562,"authors":4567,"heroImage":4563,"date":4568,"body":4569,"category":743,"tags":4570},[4083],"2020-11-09","\n\nThe [Scalability Team][scalability] has the goal of understanding\npotential scaling bottlenecks in our application. We formed a year ago\nwith one person, and as of early 2020, we are made up of three backend\nengineers, plus one site reliability engineer. We are a\nsort of [program team] so we have a wide remit, and there's only one\nsimilar team at GitLab: our sibling [Delivery Team][delivery]. All of\nthe backend engineers in the team (including me) came from\nworking on product development rather than infrastructure work.\n\n[scalability]: /handbook/engineering/infrastructure/team/scalability/\n[program team]: https://lethain.com/programs-owning-the-unownable/\n[delivery]: /handbook/engineering/infrastructure/team/delivery/\n\nWe recently finished a project where we [investigated our use of\nSidekiq][sidekiq] and made various improvements. We decided to continue\nthe same approach of looking at services, and got started with our next\ntarget of Redis. Here are some lessons we took away:\n\n[sidekiq]:/blog/scaling-our-use-of-sidekiq/\n\n## 1. Don't lose sight of what matters most: impact\n\nWe chose to split our work on Redis into three phases:\n\n1. [Visibility][v]: increase visibility into the service.\n2. [Triage][t]: use our increased visibility to look for problems and\n   potential improvements, and triage those.\n3. [Knowledge sharing][ks]: share what we learned with the rest of the\n   Engineering department.\n\n[v]: https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/309\n[t]: https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/309\n[ks]: https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/265\n\n[Iteration] is crucial at GitLab, so much so that we have regular\n[Iteration Office Hours]. On the surface, you could say that we were\niterating here: our issues were small and well-scoped and we were\ndelivering code to production regularly.\n\n[Iteration]: https://handbook.gitlab.com/handbook/values/#iteration\n[Iteration Office Hours]: /handbook/ceo/#iteration-office-hours\n\nThe problem, as it turned out, was that we were focused so heavily on\nunderstanding the service, that we lost track of the [results] we were\ntrying to deliver. Our [values hierarchy] puts results at the top, but\nwe hadn't given the results enough attention. We are a small team that\nneeds to cover a wide area, and we need to deliver _impactful_ changes.\n\n[results]: https://handbook.gitlab.com/handbook/values/#results\n[values hierarchy]: https://handbook.gitlab.com/handbook/values/#hierarchy\n\nThere are some [examples in our handbook][impact] – which we've added as\na result of this project – but we define impact as either having a\ndirect effect on the platform, our infrastructure, or our development\nteams. That was what was missing here, because the impact was loaded\ntowards the very end of the project: largely in the knowledge sharing\nsection.\n\n[impact]: /handbook/engineering/infrastructure/team/scalability/#impact\n\nWe spent a long time (several months) improving our visibility, which\ndefinitely has a positive impact on our SREs who spend time\ninvestigating incidents. But we could have delivered this value and more\nin a shorter time period, if we had kept clear sights on the impact we\nwanted to have.\n\n## 2. Minimum viable change applies to scaling problems too\n\nWith that framing in mind, it's quite clear that we weren't iterating in\nthe best way. To use a famous example, it's like we'd started building a\ncar by building the wheels, then the chassis, etc. That takes a long\ntime to get something useful. We could have started by [building a\nskateboard]. We didn't have a good sense of what a [minimum viable change](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc)\nwas for our team, so we got it wrong.\n\n[building a skateboard]: https://blog.crisp.se/2016/01/25/henrikkniberg/making-sense-of-mvp\n\n![Building a skateboard iteration](https://about.gitlab.com/images/blogimages/scalability-redis-efficiency-skateboard.png){: .medium.center}\nIllustration by [Henrik Kniberg](https://blog.crisp.se/2016/01/25/henrikkniberg/making-sense-of-mvp)\n{: .note.text-right}\n\nWhat would a minimum viable change look like? When we worked on this project, we\ncovered several topics: adding Redis calls to our standard structured\nlogs, exposing slow log information, and so on. With hindsight, the best\nway would probably be to slice the project differently. We could take\nthe three steps above (visibility, triage, knowledge sharing), but\nconsider them all to be necessary for a project on a single topic with a\ntangible goal.\n\nWe did this, with all the impact at the end:\n\n![Working through the first step for all topics, the second step for all topics, and finally having impact in the third step](https://about.gitlab.com/images/blogimages/scalability-redis-efficiency-before.jpg)\n\nBut traveling in the other direction would have been much more\neffective:\n\n![Working through all steps for the first topic, having impact, then starting again at the second topic](https://about.gitlab.com/images/blogimages/scalability-redis-efficiency-after.jpg)\n\nThis leads to a state where:\n\n1. The impact we make is clearer.\n2. We start making an impact sooner.\n3. We can re-assess after every project, and stop early once we have\n   done enough.\n\nThe sooner we have this impact, the sooner we can see the results of\nwhat we've done. It's also good for morale to see these results on a\nregular basis!\n\n## 3. Shape your projects to deliver impact throughout\n\nThe way that we originally structured our work to improve Redis usage made it harder to see\nour impact than it should have been. For example, we [updated our\ndevelopment documentation][dev-docs-update] at the end of the project.\nThis was useful, but it would have been much more useful to backend\nengineers if we'd updated the documentation along the way, so they always had the best information we could give them.\n\n[dev-docs-update]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/41889\n\nFor a more positive example: in the visibility stage, we created\na couple of issues directly for stage groups to address, rather than\nwaiting for the triage or knowledge sharing stage to do so. One of those\nissues was about [large cache entries for merge request\ndiscussions][mr-cache]. By getting this in front of the relevant\ndevelopment team earlier, we were able to\nget the fix scheduled and completed sooner as well.\n\n[mr-cache]: https://gitlab.com/gitlab-org/gitlab/-/issues/225600\n\nRegularly delivering projects with clear impact means that we get\nfeedback earlier (from engineers in Development and Infrastructure, or\nfrom the infrastructure itself), we can cover a wider area in less time,\nand we are happier about the work we're doing.\n\nAs people who went from working directly on user-facing features to\nworking on a property of the system as a whole, we learned that we can\nstill set ourselves an MVC to keep us on the right path, as long as we\nthink carefully about the results we want to achieve.\n\n[Cover image](https://unsplash.com/@viniciusamano?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText) by shawn henry on [Unsplash](https://unsplash.com/s/photos/skateboard?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText)\n{: .note}\n",[9,1295,723],{"slug":4572,"featured":6,"template":680},"lessons-in-iteration-from-new-infrastructure-team","content:en-us:blog:lessons-in-iteration-from-new-infrastructure-team.yml","Lessons In Iteration From New Infrastructure Team","en-us/blog/lessons-in-iteration-from-new-infrastructure-team.yml","en-us/blog/lessons-in-iteration-from-new-infrastructure-team",{"_path":4578,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4579,"content":4585,"config":4591,"_id":4593,"_type":14,"title":4594,"_source":16,"_file":4595,"_stem":4596,"_extension":19},"/en-us/blog/lessons-on-building-a-distributed-company",{"title":4580,"description":4581,"ogTitle":4580,"ogDescription":4581,"noIndex":6,"ogImage":4582,"ogUrl":4583,"ogSiteName":667,"ogType":668,"canonicalUrls":4583,"schema":4584},"9 Lessons on building a distributed company","GitLab CEO Sid Sijbrandij and Outklip Founder Sunil Kowlgi talk about remote hiring, management, customer support, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678641/Blog/Hero%20Images/lessons-building-distributed-company.jpg","https://about.gitlab.com/blog/lessons-on-building-a-distributed-company","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"9 Lessons on building a distributed company\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sunil Kowlgi\"}],\n        \"datePublished\": \"2019-04-18\",\n      }",{"title":4580,"description":4581,"authors":4586,"heroImage":4582,"date":4588,"body":4589,"category":808,"tags":4590},[4587],"Sunil Kowlgi","2019-04-18","\n_GitLab CEO Sid Sijbrandij occasionally sits down for a \"[pick your brain](/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)\"\nmeeting with people seeking advice on open source, remote work, or a discussion of other things related to GitLab._\n\nIt is far easier to run an all-remote company than one that’s a hybrid of remote and colocated,\nsays [Sid Sijbrandij](/company/team/#sytses). When a company adopts a colocated\nculture there’s less recording of things and fewer digital artifacts, so it’s going to be hard for\nthe rest of the company to figure out how decisions are made.\n\nI interviewed Sid for lessons on building a distributed company like GitLab. Sid answered\nquestions on topics ranging from hiring to customer support.\n\nMy top takeaways from the interview:\n\n### 1. Remote interviews are more convenient than in-person interviews\n\nDuring an in-person interview, you need to make sure all your interview materials are loaded\nbeforehand on your laptop or iPad. It’s also going to be hard navigating things on your computer\nwhile talking to a person in front of you. You might write down notes that you’ll need to\ndigitize later by scanning, which is redundant work. On the other hand, when interviewing\nsomeone remotely over a video conference, you have all the materials at hand.\nBecause you’re looking at a screen you can look up information online and quickly take notes without interruption.\n\n### 2. Spend more time on the candidate’s questions than on your questions\n\nDuring interviews, you can get a lot of information about the candidate from the questions\nthey come prepared with and their follow-on questions. When Sid interviews, he spends most of the interview on the candidate’s questions.\n\n### 3. It is really important to write things down\n\nPeople are very efficient at reading things. If you write something down you can refer to it,\nso you don’t have to say everything again. In order to have alignment in a distributed company,\nrepetition of goals and strategy is needed. Repetition is easier when you have one writeup and people are able to easily find it.\n\n### 4. Google Docs is superior to a whiteboard\n\nIt is quite common to have meetings where everyone is looking at the same thing.\nBut, because of time zone differences, it’s hard to involve everyone in a meeting.\nWhile whiteboards are commonly used in in-person meetings, they’re not missed that much by remote workers.\nGoogle Docs is superior to a whiteboard because you never run out of space, you can use\nnumbered lists and indentation, and people can view them afterwards.\n\n### 5. Cross-functional teams don’t work well\n\nGitLab doesn’t do cross-functional teams. Teams are composed of people that perform a similar role.\nA team manager is someone who has experience with that role. This way the manager is able\nto assess results, coach, and give career advice, which is very important.\n\n### 6. Focus on the output of employees, not the input\n\nGood remote workers are focused on results. Especially for managers, it’s important that they\ndon’t focus on the input of people – how long they worked or things like that – but rather focus on the output.\nFocus on the input is not healthy in any company, but especially with remote work you have to let it go.\nNo one’s looking over your shoulder to check whether you’re on Facebook or not, and it’s fine if you\nare as long as you deliver the work to a reasonable degree.\n\n### 7. To be a good manager, you have to quickly identify and remedy underperformance\n\nGitLab hires people who are capable of being [managers of one](https://handbook.gitlab.com/handbook/values/#managers-of-one). But in instances where someone\nis underperforming, managers have to identify it, have a conversation, and take remedial action.\nHere’s [GitLab’s process for dealing with underperformance](/handbook/leadership/underperformance/).\n\n### 8. Be quick with recognition\n\nGitLab has various kinds of employee recognition. For quick recognition, there’s a #thanks\nchannel on Slack where people can celebrate their colleagues’ work. There are also $1,000\ndiscretionary bonuses and GitLab tends to be very high velocity with those.\nRecognizing employees and doing it quickly is really important.\n\n### 9. Put customer-reported issues on a level playing field with internally reported issues\n\nThe issue tracking process in GitLab doesn’t distinguish whether the issue reporter is a user,\n a customer, or a team member. If an issue comes from a user or customer, it’s probably\nbecause they care a lot about what you’re building. So, every feature request, everything\nGitLab team-members work on is out there on a level playing field. GitLab tends to have a lot more\ninteraction with customers than other companies.\n\nWatch the full interview below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pDU8lxh1-6U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n[Visit this page to read the transcript of the interview](https://outklip.com/blog/gitlab-building-a-distributed-company/).\n\n### About the guest author\n\nSunil Kowlgi is the founder of [Outklip](https://outklip.com), a video platform for remote work.\n\nPhoto by [Brett Zeck](https://unsplash.com/photos/eyfMgGvo9PA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/globe?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,745,832,873],{"slug":4592,"featured":6,"template":680},"lessons-on-building-a-distributed-company","content:en-us:blog:lessons-on-building-a-distributed-company.yml","Lessons On Building A Distributed Company","en-us/blog/lessons-on-building-a-distributed-company.yml","en-us/blog/lessons-on-building-a-distributed-company",{"_path":4598,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4599,"content":4605,"config":4611,"_id":4613,"_type":14,"title":4614,"_source":16,"_file":4615,"_stem":4616,"_extension":19},"/en-us/blog/let-s-talk-swag",{"title":4600,"description":4601,"ogTitle":4600,"ogDescription":4601,"noIndex":6,"ogImage":4602,"ogUrl":4603,"ogSiteName":667,"ogType":668,"canonicalUrls":4603,"schema":4604},"Let's talk swag","The GitLab swag store is live with all new swag goodies, and we've got a 25% code to share.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671738/Blog/Hero%20Images/swag-cover.jpg","https://about.gitlab.com/blog/let-s-talk-swag","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Let's talk swag\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily Kyle\"}],\n        \"datePublished\": \"2017-08-04\",\n      }",{"title":4600,"description":4601,"authors":4606,"heroImage":4602,"date":4608,"body":4609,"category":299,"tags":4610},[4607],"Emily Kyle","2017-08-04","\n\nEXTRA! EXTRA! This just in, GitLab just launched a new [swag store](https://shop.gitlab.com/) filled with all types of new goodies. And…for the first time ever, a few select items that you could only lay your hands on at GitLab events will now be available in the store. AKA everyone can have their very own pair of GitLab socks and hand-knit tanuki plush toy.\n\n\u003C!-- more -->\n\nNew items never available before include hoodies, pajama pants, speakers, hats, flasks, and notebooks. We couldn't be more excited to share all these new items with the community, so for a limited time we will be offering a 25% discount (**NEWSWAG**) off everything in the store, with free shipping to all countries. Yes, I said FREE. So, now is the best time to get yourself outfitted with all the GitLab gear you can handle. Technically, you could be dressed head to toe in tanuki wear.\n\n![Tanuki toes](https://about.gitlab.com/images/blogimages/swag-store-socks.jpg){: .shadow}\n\nBut wait, there's more! **For every 25 people who [retweet this announcement](https://twitter.com/gitlab/status/893396098114437121), we will pull one winner to receive a new GitLab hoodie.**  \n\nHere are some of our favorite swaggerific photos from the past year to inspire your GitLab shopping adventure:\n\n![Best of swag](https://about.gitlab.com/images/blogimages/best-of-swag.jpg){: .shadow}\n\nLastly, as our motto states, everyone can contribute — so, we're always open to hearing your [new swag suggestions](https://gitlab.com/gitlab-com/swag_suggestions)!\n\n#### Update August 11th\n\nDue to the extreme amount of orders we got after the store launch, we had to briefly pause our store for the past two days. Everything's back up and running now! Enjoy!\n",[9],{"slug":4612,"featured":6,"template":680},"let-s-talk-swag","content:en-us:blog:let-s-talk-swag.yml","Let S Talk Swag","en-us/blog/let-s-talk-swag.yml","en-us/blog/let-s-talk-swag",{"_path":4618,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4619,"content":4625,"config":4631,"_id":4633,"_type":14,"title":4634,"_source":16,"_file":4635,"_stem":4636,"_extension":19},"/en-us/blog/live-from-commit-london",{"title":4620,"description":4621,"ogTitle":4620,"ogDescription":4621,"noIndex":6,"ogImage":4622,"ogUrl":4623,"ogSiteName":667,"ogType":668,"canonicalUrls":4623,"schema":4624},"Live from Commit London","We're having a packed day at our first European user conference. Watch this space for the latest news.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678442/Blog/Hero%20Images/londoncommit.png","https://about.gitlab.com/blog/live-from-commit-london","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Live from Commit London\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-10-09\",\n      }",{"title":4620,"description":4621,"authors":4626,"heroImage":4622,"date":4627,"body":4628,"category":299,"tags":4629},[869],"2019-10-09","\n**9:30AM BST** – GitLab CEO [Sid Sijbrandij](/company/team/#sytses) told attendees at our first European user conference that support for Amazon Web Services' Elastic Kubernetes Service (EKS) will be available later this year. Sid also underscored the importance of the European market. Almost one-third of GitLab's business comes from Europe and 42% of our customers are based in Europe.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\">\u003Cp lang=\"en\" dir=\"ltr\">Gitlab Commit London warming up with breakfast networking 🤜🏻💥🚀 cc \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/gitlabcommit?src=hash&amp;ref_src=twsrc%5Etfw\">#gitlabcommit\u003C/a> \u003Ca href=\"https://t.co/ke7nsNE7pO\">pic.twitter.com/ke7nsNE7pO\u003C/a>\u003C/p>&mdash; James McLeod (@mcleo_d) \u003Ca href=\"https://twitter.com/mcleo_d/status/1181849833604337667?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n**10:00AM BST** – Speed matters, particularly at Porsche AG. Software engineers Alberto Gisbert and Dennis Menge told Commit 2019 attendees how a quest to improve collaboration, reduce tool complexity and achieve a single source of truth led the car manufacturer to GitLab. Porsche started using GitLab in Europe initially, but quickly realized it needed to expand to China, Porsche's largest market, as well. One year into the project, Porsche has more than 660 repositories with more than 250 active users. All told, more than 80,000 pipelines have been triggered.\n\nUp next, Capgemini UK's [Matt Smith](https://twitter.com/Harmelodic) shared how to go from [Zero to K8s: As Fast As Possible](https://gitlabcommit2019london.sched.com/event/UL5X/zero-to-k8s-as-fast-as-possible):\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-conversation=\"none\">\u003Cp lang=\"en\" dir=\"ltr\">Britney mic&#39;d up!\u003Cbr>\u003Cbr>On stage in half an hour 😬\u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a> \u003Ca href=\"https://t.co/ivQ1V9waBW\">pic.twitter.com/ivQ1V9waBW\u003C/a>\u003C/p>&mdash; Matt Smith (@Harmelodic) \u003Ca href=\"https://twitter.com/Harmelodic/status/1181851029048102912?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nCoding in the blink of an eye!\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\">\u003Cp lang=\"en\" dir=\"ltr\">.\u003Ca href=\"https://twitter.com/Harmelodic?ref_src=twsrc%5Etfw\">@Harmelodic\u003C/a> is talking faster than \u003Ca href=\"https://twitter.com/hashtag/terraform?src=hash&amp;ref_src=twsrc%5Etfw\">#terraform\u003C/a> can deploy things :joy: Great live coding :sunglasses: \u003Ca href=\"https://twitter.com/hashtag/gitlabcommit?src=hash&amp;ref_src=twsrc%5Etfw\">#gitlabcommit\u003C/a> \u003Ca href=\"https://t.co/LS0t3GdqHx\">pic.twitter.com/LS0t3GdqHx\u003C/a>\u003C/p>&mdash; Michael Friedrich (@dnsmichi) \u003Ca href=\"https://twitter.com/dnsmichi/status/1181862263680053248?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n**11:30AM BST** – How to shift left and bring security more firmly into development was the topic of a mid-morning panel discussion at Commit.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-conversation=\"none\">\u003Cp lang=\"en\" dir=\"ltr\">\u003Ca href=\"https://twitter.com/Shetti?ref_src=twsrc%5Etfw\">@Shetti\u003C/a> of \u003Ca href=\"https://twitter.com/VMware?ref_src=twsrc%5Etfw\">@VMware\u003C/a> leads a panel discussion on security in the software development life cycle with Jeremy Guido, \u003Ca href=\"https://twitter.com/plafoucriere?ref_src=twsrc%5Etfw\">@plafoucriere\u003C/a> and \u003Ca href=\"https://twitter.com/simasotiris?ref_src=twsrc%5Etfw\">@simasotiris\u003C/a>.\u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/womenintech?src=hash&amp;ref_src=twsrc%5Etfw\">#womenintech\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/womeninstem?src=hash&amp;ref_src=twsrc%5Etfw\">#womeninstem\u003C/a> \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/womenwhocode?src=hash&amp;ref_src=twsrc%5Etfw\">#womenwhocode\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/git?src=hash&amp;ref_src=twsrc%5Etfw\">#git\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/DevOps?src=hash&amp;ref_src=twsrc%5Etfw\">#DevOps\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/opensource?src=hash&amp;ref_src=twsrc%5Etfw\">#opensource\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/security?src=hash&amp;ref_src=twsrc%5Etfw\">#security\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/sdlc?src=hash&amp;ref_src=twsrc%5Etfw\">#sdlc\u003C/a> \u003Ca href=\"https://t.co/lQeQYelTVv\">pic.twitter.com/lQeQYelTVv\u003C/a>\u003C/p>&mdash; Suze Shardlow at #GitLabCommit (@SuzeShardlow) \u003Ca href=\"https://twitter.com/SuzeShardlow/status/1181874495268773888?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nSotiraki Sima, executive director in technology risk at Goldman Sachs, stressed the benefits of starting small and being prepared to continually adapt to new technologies and new tools. [Jeremy Guido](https://fr.linkedin.com/in/jeremyguido), backend engineer with My Data Models, said designating a security leader in a development team can help to make everyone feel more like a stakeholder. And [Philippe Lafoucriere](https://about.gitlab.com/company/team/#plafoucriere), distinguished engineer at GitLab, stressed the role of automation in scaling security throughout the SDLC. The bottom line: it's a process so take it a step at a time.\n\n**1:00PM BST** – What's next for the GitLab tool? [Eric Brinkman](/company/team/#ebrinkman), director of product, dev products, outlined our technology roadmap. He began with Meltano, a six-person startup located within GitLab that is focused on bringing DevOps best practices to DataOps. Eric announced that today [version 1.0 of Meltano](https://meltano.com/blog/meltano-graduates-to-version-1-0/) is available.\n\nAnd that was just the beginning. Value stream management will be coming soon to Manage, Eric said, so users will be able to track efficiency metrics and ultimately receive recommendations. Plan stage will add high and low release requirements related to code and test. In Create, our source code management and code review will get an upgrade with an improved Web IDE and eventually the ability to do live coding. Verify will receive load testing runs by default and Secure will get [fuzzing](/direction/secure/dynamic-analysis/fuzz-testing/) as a built-in part of security testing. Changes to Release will mean automatically staged rollbacks and Configure will invest in run books to improve mean time to recovery. Protect will continue to invest in real-time threat detection capabilities. And finally auto remediation is on the horizon so at some point the largely manual (and often annoying) job of finding and fixing vulnerabilities will be a thing of the past. \"This is something that can truly bring dev, sec and ops together,\" Eric said.\n\nNote: All sessions from Commit London are being recorded and will be available on our [YouTube channel](https://youtube.com/gitlab) in 24-48 hours.\n{: .alert.alert-info}\n",[267,277,9,675,4630,2749],"user stories",{"slug":4632,"featured":6,"template":680},"live-from-commit-london","content:en-us:blog:live-from-commit-london.yml","Live From Commit London","en-us/blog/live-from-commit-london.yml","en-us/blog/live-from-commit-london",{"_path":4638,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4639,"content":4644,"config":4649,"_id":4651,"_type":14,"title":4652,"_source":16,"_file":4653,"_stem":4654,"_extension":19},"/en-us/blog/living-for-more-than-the-weekend",{"title":4640,"description":4641,"ogTitle":4640,"ogDescription":4641,"noIndex":6,"ogImage":690,"ogUrl":4642,"ogSiteName":667,"ogType":668,"canonicalUrls":4642,"schema":4643},"My remote story: Living for more than the weekend","A quick look into what drove me to choose an organization that lives and breathes it's core values and allowed me the flexibility to be a full time member of my family","https://about.gitlab.com/blog/living-for-more-than-the-weekend","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"My remote story: Living for more than the weekend\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"JD Alex\"}],\n        \"datePublished\": \"2020-04-30\",\n      }",{"title":4640,"description":4641,"authors":4645,"heroImage":690,"date":1067,"body":4647,"category":698,"tags":4648},[4646],"JD Alex","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nWhat exactly does it mean to be remote? For some, it looks the same as working in a traditional office space, 9-5, glued to your computer, albeit in a different location. Maybe it is from your home office, your living room, or a co-working space. To me, working remotely means something much more. It is an opportunity to live my life how I see fit.\n\u003Cbr>\n\nI grew up with a single mother. She was and still is a nurse, subject to schedules that can be quite hectic. There were many days where I found myself heating up food my mother prepped for me before she went to work, and at times I would go with her and spend the night in the doctor’s lounge at the hospital. I don’t regret or wish anything different about those times — I loved them — but I want something different for my family, and for my son.\n\u003Cbr>\n\nI previously worked in the Bay Area for a corporate wellness company and the grind was real. I never turned off. I was attached to my phone answering calls and texts from our CEO at all hours of the day. I commuted roughly 12 miles and it could take me up to an hour to get home from work (for 2 years that is 21.75 days PER YEAR spent commuting). After a couple of years, my wife and I decided a change was needed. Fast forward to my time at my next position in Denver, Colorado where I had a shorter commute (on most days), though I still sat in an office for 50 hours a week. Sure, free food,coffee and ping pong tables were great perks, but it all changed once my son was born. There were days I would spend 20-30 minutes in the morning with him before leaving for the office, only to get home from work and get barely another hour with him at night. For me, that was unacceptable.\n\u003Cbr>\n\nI couldn’t shake the feeling that I had to make a change — I had to do better — for my own sanity, but more importantly for my son to know me. So... I quit. Before doing so, I asked to be given some flexibility. I used my track record as one of the company’s top performers, having never previously used a sick day and never taken a holiday. It was in vain. I was told I would never be allowed to work remotely. I was told it couldn’t be done successfully, culture happened in the office, and so on.\n\u003Cbr>\n\nIn the back of my mind was a startup, GitLab, that was shown to me by a friend and former co-worker. I read the [Handbook](/handbook/#introduction), I read the [values](https://handbook.gitlab.com/handbook/values/#credit), and I didn’t believe any of it for a minute. It was too good to be true. I was in the midst of wrapping up my last few weeks at a company that was rated as a front runner of Best Places to Work, Best Company Culture, and Happiest Employees. How could this place be any different? Well, I was wrong! I read everything I could find from current employees talking about GitLab and what it has done for them, what the values mean to the company, and how [Sid (CEO)](/company/team/#sytses) promoted work-life integration and mental health first by focusing on results over input. I interviewed and, long story short, was offered a position.\n\u003Cbr>\n\nFast forward 17 months to today and life looks a lot different. My family and I moved back to where we wanted to live, we have a house we love, and I have had breakfast, lunch, and dinner every single day with my wife and son as a family. In 17 months, I have been given the opportunity, the gift, of sitting down with my family for every single meal. I have  enjoyed more meals with my son in these last 17 months than some get to enjoy with their kids in 17 years.\n\u003Cbr>\n\nGitLab works asynchronously and employees are valued based on their results, not their input.GitLab understands as an organization that people need time off to gather their thoughts, re-center themselves, go for a bike ride or walk, take care of household activities, or pick kids up from school or your pets from doggy day care. Giving each and every employee the tools they need and the trust they deserve to work from anywhere that Wi-Fi allows.\n\u003Cbr>\n\nSo why do I choose remote? For me, it’s simple. I have a life, and my career is a part of that life. It is a part that I truly enjoy and also that provides for my family. A part that I do not take for granted, but it is still only a part of my life. It is not who I am, and it is not what I will be remembered for by my son. I wish to live my life every single day, and not just for the weekends. I want to be present for my family, my friends, and for myself. I want to see the world and I want to see what's in my backyard. I am able to do this because of what GitLab and remote work has offered.\n\n![Family photo](https://about.gitlab.com/images/blogimages/unfilteredblogpostmyremotestory.png){: .shadow}\n\nThe Alex's.\n{: .note.text-center}\n",[810,267,9],{"slug":4650,"featured":6,"template":680},"living-for-more-than-the-weekend","content:en-us:blog:living-for-more-than-the-weekend.yml","Living For More Than The Weekend","en-us/blog/living-for-more-than-the-weekend.yml","en-us/blog/living-for-more-than-the-weekend",{"_path":4656,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4657,"content":4663,"config":4668,"_id":4670,"_type":14,"title":4671,"_source":16,"_file":4672,"_stem":4673,"_extension":19},"/en-us/blog/low-code-no-code",{"title":4658,"description":4659,"ogTitle":4658,"ogDescription":4659,"noIndex":6,"ogImage":4660,"ogUrl":4661,"ogSiteName":667,"ogType":668,"canonicalUrls":4661,"schema":4662},"The role low code app development tools may play at GitLab","Citizen developers are creating code without being coders. CEO Sid Sijbrandij and senior PMM Parker Ennis explain the impact of low code app development tools on GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681170/Blog/Hero%20Images/lowcodenocode.jpg","https://about.gitlab.com/blog/low-code-no-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The role low code app development tools may play at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-03-26\",\n      }",{"title":4658,"description":4659,"authors":4664,"heroImage":4660,"date":4665,"body":4666,"category":787,"tags":4667},[869],"2020-03-26","\n\nIf software is eating the world and there is a [worldwide shortage of software developers](https://www.icims.com/hiring-insights/for-employers/how-to-win-tech-talent), how can companies stay in the game?\n\nOne answer: The [citizen developer](https://www.forbes.com/sites/johneverhard/2019/01/22/the-pros-and-cons-of-citizen-development/#2376328184fd). Empowered by technology, the so-called citizen developer is able to create code without a formal developer background. Two types of tools allow this: Low code app development tools let a citizen developer build apps using only the most rudimentary of coding skills, while no-code solutions are generally WYSIWYG choices that allow someone to create an app, or part of an app, using pre-assembled pieces of code.\n\nLow code and no code tools have been available for a long time – 4GL, computer-assisted software engineering (CASE) and rapid application development (RAD) tools were all precursors – and according to [IDC](http://www.idc.com), today their use is on the rise. In fact out of 23.4 million developers worldwide in 2019, IDC said 1.76 million of them are low coders, representing 7.5% of the total. There were also 810,000 no-code developers worldwide last year, according to IDC’s Market Perspective: Low-Code and No-Code Developer Census, 2019: Growth Begins in Earnest report.\n\nGiven the growing popularity, it’s not surprising the GitLab development team is taking a hard look at [how to leverage and/or integrate low code functionality](https://gitlab.com/groups/gitlab-org/-/epics/2353#note_263252013) into our tool. Recently CEO [Sid Sijbrandij](/company/team/#sytses) sat down with senior product marketing manager [Parker Ennis](/company/team/#parker_ennis) to talk about the role low code solutions can and should play at Gitlab.\n\n“So what I like about low code is the potential to have more people programming,” Sid tells Parker. And Parker is definitely enthusiastic as well. “What interests me in low code is providing the ease of getting into something like coding,” he explains. “There’s a high barrier of entry when it comes to programming. I found that first hand when I was an undergrad trying to learn Ruby on Rails. It was an intimidating, tough experience but for other people it’s something innate inside them. One of the really cool benefits of low code is you can have people starting to learn how to code without the intimidating factor.”\n\nAlso there’s no question there are simply not enough people with coding skills to fill the demand for software, Parker says, pointing to data from industry analyst and blogger [James Governor](https://redmonk.com/jgovernor/author/jgovernor) who says the world will need around 100 million developers in 10 years. Remember, we’re at just one quarter of that today.\n\nParker is particularly excited about the potential of low code tools to get kids interested in programming at an early age. “How can we educate the next generation in how to solve the problems we are creating today?” he asks. “Low code is a viable option.”\n\nMeanwhile today at GitLab we’re looking at ways we can make it easier to integrate low code tools into our workflow, Parker says. We might go further than that if a viable open source low-code tool arrives on the market.\n\n**Learn more about app develompent tools:**\n\n[Unify your logs and metrics](/blog/unifylogsmetrics/)\n\n[Get the most out of performance testing](/blog/how-were-building-up-performance-testing-of-gitlab/)\n\n[Up your merge train game](/blog/all-aboard-merge-trains/)\n\nCover image by [Anas Alshanti](https://unsplash.com/@otenteko) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[231,9,723],{"slug":4669,"featured":6,"template":680},"low-code-no-code","content:en-us:blog:low-code-no-code.yml","Low Code No Code","en-us/blog/low-code-no-code.yml","en-us/blog/low-code-no-code",{"_path":4675,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4676,"content":4681,"config":4687,"_id":4689,"_type":14,"title":4690,"_source":16,"_file":4691,"_stem":4692,"_extension":19},"/en-us/blog/making-remote-internships-successful",{"title":4677,"description":4678,"ogTitle":4677,"ogDescription":4678,"noIndex":6,"ogImage":4387,"ogUrl":4679,"ogSiteName":667,"ogType":668,"canonicalUrls":4679,"schema":4680},"How to make remote internships successful","Support Engineering Manager Lee Matos talks about pitfalls and successes in making remote internships work.","https://about.gitlab.com/blog/making-remote-internships-successful","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make remote internships successful\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Matos\"}],\n        \"datePublished\": \"2018-08-16\",\n      }",{"title":4677,"description":4678,"authors":4682,"heroImage":4387,"date":4684,"body":4685,"category":299,"tags":4686},[4683],"Lee Matos","2018-08-16","\nBack in December I introduced you to [Support Engineering at GitLab](/blog/support-engineering-at-gitlab/). Now I'm excited to talk about my experiences – good and bad – with remote internships. I think remote internships can be a great thing but not without pitfalls. Let's dive in.\n\nAs I started to lead the GitLab Support team, [Collen](/company/team/#collenkriel), our first Support Engineering intern, was wrapping up his internship. We started to spend some time together when I realized Collen was doing great work, but we didn't have a clear definition of what it took to transition out of “intern” to “Junior.” This was not due to lack of management, it was because Collen was the first. We had never even thought about what it would look like to graduate! Lesson number 1:\n\n## 1. Clearly define success\n\nInternships are challenging when you don't know what you want the internship to be about, or what you want it to accomplish. I think it's vital that everyone involved knows what success is, and how close they are to it. It took a lot of time and effort for me and Collen to figure out what we'd mark as success. That made it even more stressful as we were both scrambling to make clear and actionable markers of success as his internship came to a close. It was a sign of Collen's skill and grace that we managed to define and execute those things with a ticking clock counting down. Once we knew what success was, Collen knocked it out of the park. Now, success is different for every team and person. Keep that in mind as you define it here for yourself, and your intern.\n\n### A second chance\n\nA few months later, we had an opportunity to hire [Chenje](/company/team/#ckatanda) as an intern and my number one goal was to improve that experience. For Chenje, he had a lot of drive and a few technical projects under his belt, but lacked experience with working in technical teams. We settled on three tasks as the definition of success for Chenje's internship:\n\n+ Deploy Omnibus HA and improve Documentation\n+ Pair on 25 ticket sessions\n+ Gain expertise in one or two expert subjects\n\nFor Chenje, success was defined as completing two of the three defined tasks. This gave him some freedom to plan and schedule, and even room to fail in the face of challenging tasks. This was important because it was meaningful work, but it was also important as a manager that I can understand how team members approach problems big and small.\n\n## 2. Set expectations\n\nSome of this advice is good for any internship – not just a remote one. But one of the unique challenges of a remote internship is the lack of facetime and potential delays in communication. Both Collen and Chenje are six hours ahead of me, so the time difference was definitely a factor here. With remote work, a lot of the inefficiencies of communication and workflow that are just accepted as part of office life are exposed. There's nowhere to hide.\n\n>With remote work, a lot of the inefficiencies of communication and workflow that are just accepted as part of office life are exposed\n\nIn addition to other internship challenges, we now add the element of time coordination, and knowing that your reports can't just walk over to your desk with a question. We have to be very explicit about connecting to make meaningful change happen. There's a tendency to want that to happen synchronously, but we have to figure out alternatives.\n\nI think setting the expectation that the intern should be ready and willing to ask questions was important. Instead of waiting for you to come rescue them, they'll also need to take initiative to snag time on your calendar if they're blocked, and on your end you need to make that time to help them out. With remote work you have to be willing to step forward; you can't wait on someone else to give you tasks or to check in if everything is going smoothly. It won't work at GitLab, and probably won't fly at other remote companies either.\n\n## 3. Avoid busywork\n\nI also made it clear to Chenje that I would not be giving him busywork and that he'd be able to make real contributions. One of the advantages of a remote internship is that there's no coffee to fetch, so busywork possibilities are limited. If you're managing an intern properly, you should consider them to be 1.5x an ordinary report. I thought about the things that I wanted to do but couldn't focus on and offered those to Chenje. I wanted to give him challenges that would result in work he could be proud of. If you're considering an intern to deal with the things you don't want to do, then you should reconsider. That's a recipe for a bad internship, and your intern won't want to work with your team afterwards.\n\n>If you're managing an intern properly, you should consider them to be 1.5x an ordinary report\n\nYour intern should be someone who you believe to be capable and competent, just missing experience. The dream of an internship is that you're developing somebody who will end up working for your organization. If you're not doing it for that reason, then what's the point?\n\n## 4. But don't throw them in the deep end either\n\nWe didn't push either Collen or Chenje to jump into interacting with customers straight away, to give them time to build up their comfort level, experience, and confidence. The initial goal was that the internship is skill-building period – a safe space. You don't want to overwhelm your intern by making them do everything. They're an intern for a reason.\n\n>The initial goal was that the internship is skill-building period – a safe space\n\n## 5. Give clear feedback on progress\n\nAs an intern, Chenje had full access to the team and myself as a lead. We have weekly 1:1s and we'd review his progress. Now, Collen, our first intern, had regular 1:1s with me, but because we didn't have a clear structure of the internship, we weren't using this time to its full potential. Being able to use our 1:1 time to understand and help Chenje overcome blockers and organize made his internship incredibly smooth. We knew what success was, we regularly tracked it, and we learned how to communicate it to each other.\n\nI'm extremely proud of the work that Collen and Chenje have done on our team and how they continue to excel in the face of two very different internship experiences. If you are running a remote team, or considering interns, these things helped me turn something that started out stressful into a recipe for success.\n",[810,9,832],{"slug":4688,"featured":6,"template":680},"making-remote-internships-successful","content:en-us:blog:making-remote-internships-successful.yml","Making Remote Internships Successful","en-us/blog/making-remote-internships-successful.yml","en-us/blog/making-remote-internships-successful",{"_path":4694,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4695,"content":4701,"config":4707,"_id":4709,"_type":14,"title":4710,"_source":16,"_file":4711,"_stem":4712,"_extension":19},"/en-us/blog/manage-conversation-staying-agile",{"title":4696,"description":4697,"ogTitle":4696,"ogDescription":4697,"noIndex":6,"ogImage":4698,"ogUrl":4699,"ogSiteName":667,"ogType":668,"canonicalUrls":4699,"schema":4700},"5 Ways to stay agile in a growing organization","Some of the GitLab Manage team have a conversation about staying agile as a company grows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678597/Blog/Hero%20Images/run-agile-in-gitlab.jpg","https://about.gitlab.com/blog/manage-conversation-staying-agile","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Ways to stay agile in a growing organization\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeremy Watson\"}],\n        \"datePublished\": \"2019-06-10\",\n      }",{"title":4696,"description":4697,"authors":4702,"heroImage":4698,"date":4704,"body":4705,"category":787,"tags":4706},[4703],"Jeremy Watson","2019-06-10","\nSome of us on GitLab's [Manage team](/handbook/engineering/development/dev/manage/) had a discussion a while back about the challenges of staying agile while a company scales. In true GitLab style, the [discussion took place asynchronously via an issue](https://gitlab.com/gitlab-org/manage/issues/13). Here it is:\n\n## How do you stay agile in a growing organization?\n\n### 1. Make quick, but thoughtful decisions\n\n[Jeremy, product manager](/company/team/#gitJeremy): This is the fundamental thing that allows startups to be competitive against dominant players in a market: It's using your resources more efficiently and moving faster than anyone else.\n\nTo me, two primary characteristics that support agility are making quick but thoughtful decisions, and focus. I think Amazon is a great example of the first, and I like [Amazon's simple Type-1/Type-2 framework](https://www.forbes.com/sites/eriklarson/2018/09/24/how-jeff-bezos-uses-faster-better-decisions-to-keep-amazon-innovating/#5feb716c7a65) for identifying the Type 2 decisions that are easily reversed, and allowing the threshold of approval to be relatively low.\n\nAs companies grow, it feels like the perceived number of Type 1 decisions grows in turn – and the organization slows down as more decision layers emerge. One thing I love about GitLab is that we're still dedicated to moving quickly and we're not constantly asking for permission to make things better. If it's easily revertible and makes something better, ship it. In all honesty, I think this is one of our biggest competitive advantages.\n\n### 2. Hire the right people\n\n[Liam, engineering manager](/company/team/#lmcandrew): The interesting thing here is that lots of organizations (big and small) now realize the value of Agile ways of working (admittedly, many of which do agile but aren't agile), making it less of a competitive advantage and more like table stakes. Therefore, I think of Agile as the sensible (only?) choice when it comes to delivering your own product to customers. An Agile mentality lets you deliver incremental, low-risk value to customers, allowing you to get feedback or pivot with minimal investment.\n\nI think the single most important thing for me here is hiring – hiring the right people who truly understand the value of agile ways of working.\n\nOne of the statements in [GitLab's Efficiency value](https://handbook.gitlab.com/handbook/values/#efficiency) points out a particular behavior that is so important here:\n\n> Accept mistakes: Not every problem should lead to a new process to prevent them. Additional processes make all actions more inefficient and a mistake only affects one.\n\nAs an organization grows its headcount, the number of business processes invariably grows with it. It's very easy to add process as a knee-jerk reaction to a problem or because it makes you feel more confident in something being executed. Having a team question the value of new processes and perhaps ask \"What do we lose by introducing this process?\" is vital to keep agility.\n\n### 3. Keep teams small and focused\n\n[Jeremy](/company/team/#gitJeremy): I don't know if I agree that as an organization grows that business processes invariably grow as well. This is what I meant earlier when I mentioned focus; without smaller teams focused on problems they own, interests start to compete and decision-making slows down because more people have a stake in the outcome.\n\nYou can mitigate this with small, focused teams. This is harder in monolithic codebases with lots of dependencies between teams.\n\nI do agree that hiring is critical to ensure everyone is questioning the status quo. The default answer to new process should be \"no,\" unless there's some acute pain it alleviates.\n\n[Luke Bennett, frontend engineer](/company/team/#__lukebennett): It is hard to avoid the reduction in velocity as a single team grows beyond some unknown threshold. A \"single team\" is a group of humans (or robots I suppose) making informed decisions about a cross-section of a product. As the team grows, it typically means the number of issues is already growing. There are more people accountable for those issues, more people making decisions on those issues, and more people contributing to those issues.\n\nIn software it also leads to team members working \"at the same workbench\" too often and of course makes the job of managing the team harder; even hosting a productive team call or keeping in touch with team members can become a challenge. This can easily lead to inefficient hierarchies to \"patch\" the problem, which can seem like a simple short cut compared to getting **more** Agile.\n\nFrom my own experience, splitting a large team into smaller ones instantly provides a feeling of relief for team members. Of course it's not just about the size of the team, it's also about their responsibilities/scope. Team members desperately want to be contributing meaningful changes on time and a reduction in scope lets them focus again on a more specific cross-section of the product, shifting attention away from the larger team discussions that may not be specific to a product area. Put simply, a discussion between [Manage](/stages-devops-lifecycle/) product category members of 10 people will be much more product-focused than a Frontend discussion of 20 people. You can expect their contributions to be the same. Additionally, the chance to build a stronger connection and appreciation for your team members is not to be ignored. There are definitely productivity gains when everyone is on the same raft!\n\n>Team members desperately want to be contributing meaningful changes on time and a reduction in scope lets them focus again on a more specific cross-section of the product, shifting attention away from the larger team discussions that may not be specific to a product area.\n\nI feel like this is a natural behaviour of humans. Agile feels natural to me at least and historically people never seem to work too well in very large groups. In the UK at least, we often reference the proverb \"Too many cooks spoil the broth.\" It's a little more complex and less brutal than that in software development, but it stands.\n\nThat said, avoiding large teams can lead to more problems. It reminds me of [Amdahl's law](https://en.wikipedia.org/wiki/Amdahl%27s_law) in that when you create more Agile teams, you create management overhead to orchestrate the direction of the teams. Agile with small teams is relatively simple because this effect is negligible, but as you scale your Agile organisation, you have to start paying attention to it.\n\n### 4. Allow teams to experiment with their own processes\n\n[Sanad Liaquat, senior test automation engineer](/company/team/#sanadliaquat): To me, keeping the size of teams small and focused on specific areas with well defined scope/boundaries is very important to stay agile in a growing organization. Also, the team should be allowed to discover their own processes and evolve. This works very well when the organization has teams laboring on separate projects with separate codebases. Each Agile team/project can then share what works best for them with other teams which can decide to adopt the practice or not. When projects have dependencies on each other, it is important that there be effective coordination on release timings between teams.\n\nWith organizations such as GitLab, where there is a single codebase, teams having their own process is not pragmatic. I believe GitLab handles Agile very well by dividing the organization into 2D slices of teams (Frontend, Backend, Security, Quality, etc.) and groups (Plan, Manage, Create, etc.) and having well-defined processes shared across groups. I believe it is necessary to keep an eye on the size of the group and think about breaking it down if it grows beyond what is considered a small and effective Agile group. (How small is \"small\" would be a separate discussion.)\n\n[Jeremy](/company/team/#gitJeremy): Yeah, I agree that small teams are pretty key. Sanad brought up dependencies, which is really important. You can have small teams, but if they can't operate independently you'll lose all your velocity.\n\nIt's interesting that you say that teams at GitLab don't have their own processes, because it feels like our teams DO have their own processes. We have some standardization like release cadence (monthly on the 22nd) and some labels (Deliverable), but we're free to do our own thing.\n\nWe operate differently than [Plan](/handbook/engineering/development/dev/plan/) and [Create:Source Code](/handbook/engineering/development/dev/create/source-code-be/), for instance. Plan uses the \"due-22nd\" label to split the work into two-week chunks, and Create:Source Code still estimates issues individually. I think it's a strength that we can individually experiment, but why isn't this more of a problem?\n\nI do think that different teams have different needs. I feel like some processes work better for other teams – maybe based on the personalities of the people or the engineering/product maturity of that particular stage.\n\nI don't know if we've really asked \"why\" or documented what's worked and what hasn't. I'm sure individual teams have experimented a lot, but I wonder if we're missing out by not tracking and sharing some of the things we've tried.\n\n[Sanad](/company/team/#sanadliaquat): I was not aware of other teams within GitLab having different processes like the ones Jeremy mentioned. I do agree that some processes can differ within teams and it is a strength that allows a team to experiment on their own and evolve as they deem fit for themselves. However, when working on the same codebase, it is better (or unavoidable) for teams to have uniformity on things like the code review process, testing strategies, documentation standards, etc.\n\n### 5. Make sure everyone is on the same page\n\n[Martin Wortschack, senior frontend engineer](/company/team/#wortschi): I also want to emphasize how important it is for an organization's leadership to understand what \"Agile\" means and that it's not just another fancy buzzword. It requires change. Depending on the organization it could mean anything including introducing new processes, hiring the right people, etc.  Therefore it's very important that everyone involved has the same expectations and common understanding of \"staying Agile\" (or \"becoming Agile\") and understands the necessary steps that need to be taken towards being an Agile organization. The best talent won't be able to change much if their decisions are not backed by the executives. I've seen a lot of companies that would consider themselves an \"Agile organization\" just because they have set up a Jira project.\n\nSo, to me the most important thing is everybody's ability and willingness to change.\n\n_If you'd like to see more of these discussions around other topics, please let us know in the comments below or in [the original issue](https://gitlab.com/gitlab-org/manage/issues/13)._\n\n[Photo](https://unsplash.com/photos/HSXv_s2gH3U?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Andrew McElroy on [Unsplash](https://unsplash.com/search/photos/sprint?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[831,811,9],{"slug":4708,"featured":6,"template":680},"manage-conversation-staying-agile","content:en-us:blog:manage-conversation-staying-agile.yml","Manage Conversation Staying Agile","en-us/blog/manage-conversation-staying-agile.yml","en-us/blog/manage-conversation-staying-agile",{"_path":4714,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4715,"content":4720,"config":4725,"_id":4727,"_type":14,"title":4728,"_source":16,"_file":4729,"_stem":4730,"_extension":19},"/en-us/blog/manager-training",{"title":4716,"description":4717,"ogTitle":4716,"ogDescription":4717,"noIndex":6,"ogImage":690,"ogUrl":4718,"ogSiteName":667,"ogType":668,"canonicalUrls":4718,"schema":4719},"Building an All-Remote Management Enablement Program","How to build an all-remote management training & enablement program for the future of work.","https://about.gitlab.com/blog/manager-training","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building an All-Remote Management Enablement Program\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Josh Zimmerman\"}],\n        \"datePublished\": \"2021-02-19\",\n      }",{"title":4716,"description":4717,"authors":4721,"heroImage":690,"date":4722,"body":4723,"category":698,"tags":4724},[1247],"2021-02-19","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nOne of GitLab Learning & Development’s (L&D) biggest charters for FY21 was building out a management training program. It was a huge task! The CEO asked the L&D team to build a program that trained managers on remote leadership, managing teams, and management best practices. GitLab has been around since 2011. With our massive growth over the years, there was a huge need to train and develop managers for the future. Building a program from scratch was going to require a proactive approach to ensure all voices were heard and to build a program that equipped our leaders with the right skills. \n\nSo, how do you build a management training program for an all-remote company? What do you include? How do you design and develop an impactful program? \n\nIn this blog, I’ll cover some tips and tricks to what we did in L&D to build the [Manager Challenge](https://about.gitlab.com/handbook/people-group/learning-and-development/manager-challenge/) program. \n\n### Start With a Learning Needs Analysis\n\nWhen I first started at GitLab, I learned that there had never been a formal management training program. L&D was a relatively new function within the organization. With the massive growth, L&D saw an opportunity to train our managers for the skillset they needed to be successful. Our first task for developing a program was to conduct a learning needs analysis. We took a consulting approach to the analysis by interviewing a wide range of stakeholders at the company with varying experience levels. From C-suite executives to new managers, to established Directors, we had to diversify who we would receive input from. \n\nWe divided between us, at the time a team of two, by collecting feedback on the management needs and skill gaps. We conducted a job task analysis by determining what managers do at GitLab and what knowledge and skills they would need. During the interviews, we identified consistent themes across stakeholder groups. Some of the themes mentioned “foundational management” as a critical area to focus skill building. Many of our people leaders had been recently promoted and never managed a team before. The skills needed to manage people are different when you have direct reports versus being an individual contributor. \n\nFrom the learning needs analysis, we could pull out additional themes and recommendations for the training. Managing an all-remote team requires a different set of skills than a colocated office environment. For one, people leaders need to ensure their people are set up to be “[Managers of One](https://about.gitlab.com/handbook/leadership/#managers-of-one).” You have to empower your people to work autonomously and get the job done to achieve results. We synthesized the themes which led us into the storyboarding and training design phase. \n\n### Design a Training Experience That Fits Your Culture\n\nEveryone is super busy at GitLab. Like any high-growth, pre-IPO organization, the company moves at lightning speed. We knew that managers would have limited time to dedicate to training. L&D didn’t want to make managers take huge chunks out of their day to dedicate to training. And there is nothing worse than being on a three to five-hour-long virtual training event! \n\nThe training was divided into two parts: \n1. Daily asynchronous learning activities \n2. Weekly live learning sessions\n\nWe knew that the training needed to be bite-sized over a period of time to reinforce management behaviors and skill-building. When we started designing the program, we looked at 30-day challenges as a framework to support behavior change. Participants would be required to do a short daily challenge that would take twenty minutes to complete on their own time. GitLab is a global company. Our team members live in over 65+ countries around the globe. Coordinating calendars with managers was going to be difficult for dedicated virtual live training time. Instead, we built the curriculum by dividing up themes and topics into weeks and days. We created bite-sized learning and actions for participants to complete on their own time. \n\nAt GitLab, we don’t just read off of slides during a presentation. We ask that participants review slides ahead of the call and use the time together to ask questions while facilitating a discussion. We designed the live learning sessions with these best practices in mind. The live learning sessions would focus on the themes covered during the daily asynchronous activities. Also, we prompted managers to openly discuss specific management topics (i.e., giving/receiving feedback, performance discussions, wellbeing check-ins, etc.) that are important to GitLab. \n\nThe program design started to take shape. We designed a three-week program with asynchronous learning activities to be completed Monday-Wednesday. Thursday’s were dedicated to live-learning events to network and learn from other managers. Friday’s served as catch up days, weekly course evaluations, and self-reflections.\n\n### Use What You Have Available \n\nThe best way to understand how GitLab works is to use it for as much of your job as possible. We [dogfood](https://about.gitlab.com/handbook/product/product-processes/#dogfood-everything) our product by threading it into everything we do in the organization. Managers need to be well-equipped with using GitLab to manage their all-remote team. We designed the training to incorporate GitLab into the curriculum as much as possible. The daily asynchronous learning activities are posted in a [GitLab Issue](https://docs.gitlab.com/ee/user/project/issues/). Everyone in the program, anyone with a GitLab.com account, has access to the learning content. The asynchronous topic was posted daily. Participants could read through the Issue and complete the action item by posting their responses in the comments section. \n\nThe practice enabled our [transparency value](https://handbook.gitlab.com/handbook/values/#transparency) by allowing all participants (anyone really) to review manager’s responses. The benefit of using GitLab reinforced multiple behaviors. One, everyone was dogfooding our product. Two, participants could learn from others by reading how other managers respond to different situations. Three, participants now have a reference point to go back to as they grow in their careers. \n\nDoes your organization have a tool like GitLab to help facilitate L&D initiatives? If so, consider using it to reinforce behaviors and to allow managers to become comfortable using them. If not, consider having your team members sign up for a free [GitLab account](https://gitlab.com/) and [implement a challenge](https://about.gitlab.com/handbook/people-group/learning-and-development/manager-challenge/#learning-and-development-team) using GitLab.  \n\n### Apply Social Learning \n\nRemote work can have some drawbacks. One of those challenges may be a lack of connection with your coworkers. Managers need to form relationships with their team members over virtual calls. And people leaders may not have a lot of opportunities to learn from others in a social setting. When you work for a globally distributed team, there can be isolation if the rest of your team is in different time zones. \n\nWe designed the live learning session as a forum for social learning. Managers were given prompts and scenarios on certain situations they would face in their role. Breakout activities were implemented to strengthen networks and collaboration. Participants would share tips on how they would handle the scenarios. We focused less on slides and presenting material and more on engaging with one another to learn from others. Managers shared lessons learned, and many participants walked away from the live learning sessions with new skills to apply right away on the job. \n\n### Review and Validate the Program with Executives\n\nWe are lucky that our leadership team is passionate about the growth and development of our team members. GitLab’s CEO, Sid, asked us to spearhead management training, and he partnered with us on reviewing the content to ensure it aligned with his vision. High Output Management is a book written by Andrew Grove, former CEO of Intel. It is one of our CEO’s favorite books!\n\nWhen we met with Sid for the first time to review the curriculum, he wanted us to ensure that important principles covered in the book were included. We threaded multiple topics (i.e., 1-1 meetings, performance management, making decisions, etc.) into the program. \n\nAlso, our executive review meetings validated whether or not the program reinforced our values. [Gitlab Values](https://handbook.gitlab.com/handbook/values/) are central to how the organization operates. I’ve never worked for a company where they are emphasized so much! Executives had a keen eye on ensuring that the program equipped managers with being role models of our values. The review and validation from executives were vital as we launched GitLab’s management training program. \n\n### Don’t be afraid to Iterate\n\nIt’s easy for L&D professionals to get caught up in requirement gathering and rapidly develop learning programs. However, it’s important to remember that your solution’s best feedback will occur once you pilot the program. We’ve launched two iterations of the Manager Challenge program, and the two looked completely different. The first program was longer, four weeks, and didn’t do enough to reinforce GitLab Values. We also held several meetings with leadership to thread more GitLab “ways of working” content into the curriculum. We ended up cutting out one of the weeks of training to make it three weeks and used the book High Output Management as the foundation to the enablement. \n\nFor the first iteration, we created a large project plan. We didn’t start with the [smallest thing possible and get it out as quickly as possible](https://about.gitlab.com/blog/behind-the-scenes-how-we-built-review-apps/). The plan allowed us to develop a comprehensive curriculum, but it was without testing. The upfront work took a great deal of time. Looking back, we should have developed a shorter program, iterated, and moved forward with the next version. To be successful, we had to get something out right away, pilot, receive feedback, and update. \n\nDuring the training, we conducted weekly evaluations of the content. With the feedback, we were able to apply constructive points and incorporate them into the next week. For example, participants wanted to network more. So we adapted the curriculum and added more social learning in the remaining weeks. \n\nIteration was central to how we rolled out a more seamless program that incorporated GitLab Values and ways-of-working. Don’t be afraid to iterate if you are building a management training program. The best feedback will come once you get it out the door. \n\n### The Result\n\n\nAfter months of planning, content development, stakeholder reviews, we developed the Manager Challenge program for GitLab people leaders. The program is a blended learning approach that incorporates self-paced daily challenges and live learning sessions to build foundational management skills. The program includes leadership assessments, interactive learning, networking, and digital learning, all in three weeks. The program builds a set of baseline management skills that complement our values. \n\nHere’s what a few participants had to say about the program: \n1. \"The handbook has so much content, it's easy to forget how much tactical information can be found right at your fingertips.\"\n2. \"Team performance is cyclical. Perceived regressions aren't bad, but rather a reflection of a change in team dynamics. Look for the types of questions people are asking to know how to respond.\"\n3. \"The handbook is a great resource with tons of information on being a manager, having hard conversations, and helping teams grow.\"\n4. \"For me, these are good reminders of what are the best practices to adopt as a Manager. I am always exploring what are ways we can do tasks better and faster. With that said, as a manager, we need to be sure my people and others are part of the process.\"\n5. \"I learned that there are so many amazing managers here at GitLab. Each of the days' comments were treasure troves into how to approach something differently or new techniques that others have found success with.\"\n6. \"It's possible to be a great remote manager!\"\n\nIf you are set out to create a management training program for your organization to develop leaders, use some of the points in this blog as a reference point. Feel free to reach out to GitLab Learning & Development at `learning@gitlab.com`. \n\n### Looking for more Learning and Development material from GitLab?\n\nIf you want to learn more about what the Learning and Development team at GitLab is up to, check out our [handbook page](/handbook/people-group/learning-and-development/) or read our past newsletters.\n",[832,9,811],{"slug":4726,"featured":6,"template":680},"manager-training","content:en-us:blog:manager-training.yml","Manager Training","en-us/blog/manager-training.yml","en-us/blog/manager-training",{"_path":4732,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4733,"content":4739,"config":4745,"_id":4747,"_type":14,"title":4748,"_source":16,"_file":4749,"_stem":4750,"_extension":19},"/en-us/blog/managing-global-projects-requiring-rapid-response-continuously",{"title":4734,"description":4735,"ogTitle":4734,"ogDescription":4735,"noIndex":6,"ogImage":4736,"ogUrl":4737,"ogSiteName":667,"ogType":668,"canonicalUrls":4737,"schema":4738},"How to leverage distributed engineering teams for rapid response","Rapid response issues can be handled in a compressed time frame if distributed engineering teams can work continuously. Here's what we've learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681331/Blog/Hero%20Images/all-remote-world-banner-1920x1080.png","https://about.gitlab.com/blog/managing-global-projects-requiring-rapid-response-continuously","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to leverage distributed engineering teams for rapid response\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Baus\"}],\n        \"datePublished\": \"2021-06-04\",\n      }",{"title":4734,"description":4735,"authors":4740,"heroImage":4736,"date":4742,"body":4743,"category":743,"tags":4744},[4741],"Chris Baus","2021-06-04","\n\nI am an [Engineering Manager](https://gitlab.com/chris_baus) working on a distributed engineering team at GitLab. [Our team](/handbook/engineering/development/fulfillment/purchase/) is distributed globally, and we have engineers working in India, Germany, Australia, New Zealand, and the United States. I am [located](https://www.google.com/maps/place/Stateline,+NV/) in the U.S. in Pacific Standard Time (PST). In coordination with [other](/handbook/engineering/development/ops/verify/#verifycontinuous-integration) globally distributed engineering teams, we recently responded to an [abuse issue](/blog/prevent-crypto-mining-abuse/) which was causing disruptions for legitimate GitLab.com users, and required a [rapid response](/handbook/engineering/workflow/#rapid-engineering-response).\n\n## Global distribution as an advantage\n\nMany managers view global team distribution as a constraint (because synchronous communication becomes more difficult), but it is possible to [embrace the constraint](https://basecamp.com/gettingreal/03.4-embrace-constraints) and turn it into an advantage. When teams are globally distributed it is possible for work to continue around-the-clock, uninterrupted, and decrease the overall delivery time of projects. I refer to this as \"continuous development.\"\n\nWhile we don't typically work this way, when problems are pressing, working continuously can be a strategy to advance the delivery time frame. In this case, two engineers from our team worked on the problem [17](https://www.google.com/maps/place/Bellingham,+WA/) [hours](https://www.google.com/maps/place/Melbourne+VIC,+Australia/) apart. This provided some overlap in the afternoon (PST), but for the most part, the engineers were working on the project at different times which allowed work to progress continuously.\n\nIt requires some extra management compared to the typical workflow, but the effort may be worth the investment if time is critical.\n\n## Define clear handoffs\n\nOne risk of multiple engineers working continuously and [asynchronously](https://baus.net/embrace-asynchronous-work/) is duplicating work from lack of clear separation of work or handoffs. If possible, it is best to separate work, so engineers are working in different areas of code, but separating work might not always be feasible or practical. In either case, when an engineer finishes working for the day, they should provide an update describing the work which was completed, any problems impeding progress, and what is left to be done.\n\nIf engineers are working in the same area of code, it should be clearly defined if they are working in the same branch or separate branches. If they are working in the same branch, it might make sense for one engineer to maintain branch and accept merges from other engineers before it merged into the main development branch.\n\n## Agree on interfaces\n\nWhen distributed engineering teams are working on a project, it is critical to define clear and documented interfaces between systems and components. System interfaces should be documented in a centrally maintained location. If there is a need to change the interface, then everyone affected by the change should be notified.\n\nIn retrospect, we lost nearly a day of testing because of confusion about an interface between the frontend and backend of the system. These types of problems tend to be amplified when not all engineers involved in the project are available at the same time, as it may take an entire 24-hour cycle to handle and communicate changes. When a discrepancy is found, the problem should be documented by the engineers currently working and, if possible, a solution proposed.\n\n## Place synchronous communication on management\n\nWhen working concurrently, to help ensure all teams are on the same path, it can be helpful to discuss the project status synchronously. This can be difficult to arrange with distributed engineering teams. On this project, the technical teams met twice weekly for 15-30 minutes. It can be tempting to require team members to work off hours to attend synchronous meetings. I'd recommend fighting this tendency.\n\nIt's the responsibility of a manager to ensure effective communication across teams. During rapid-response actions, it's helpful to keep flexible working hours to synchronize with team members across different time zones. I accept working outside my typical hours (knowing I can [adjust my hours](/company/culture/all-remote/non-linear-workday/) at other times of the day), to communicate the status of my team synchronously. This also requires the manager to have a more detailed technical understanding of the implementation and status than is normally required, so they can speak on behalf of offline team members.\n\nInstead of requiring synchronous meeting attendance, [take good notes](/company/culture/all-remote/meetings/#document-everything-live-yes-everything) and [record the meeting](https://www.youtube.com/channel/UCMtZ0sc1HHNtGGWZFDRTh5A) so team members in other time zones can review the status and decisions from synchronous meetings.\n\n## Trade-offs\n\nIn many ways, engineering is the art of balancing trade-offs. Operating in a continuous, globally-distributed fashion takes more management and cognitive overhead than typical asynchronous workflows, but when time is a priority, it could decrease the release time on critical projects.\n\nOperating continuously may come at cost of other management tasks as compressing time increases the effort required to oversee the project requiring a [rapid response](/handbook/engineering/workflow/#rapid-engineering-response). At the end of the rapid-response issue, a retrospective should be held to determine if the engineering strategy provided the expected results, relative to the increased overhead. My recommendation is to be realistic about costs when planning continuous development even when it provides short-term results.\n\n_Read more on [leading engineering teams](/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers/)._\n",[832,9,831,811],{"slug":4746,"featured":6,"template":680},"managing-global-projects-requiring-rapid-response-continuously","content:en-us:blog:managing-global-projects-requiring-rapid-response-continuously.yml","Managing Global Projects Requiring Rapid Response Continuously","en-us/blog/managing-global-projects-requiring-rapid-response-continuously.yml","en-us/blog/managing-global-projects-requiring-rapid-response-continuously",{"_path":4752,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4753,"content":4759,"config":4765,"_id":4767,"_type":14,"title":4768,"_source":16,"_file":4769,"_stem":4770,"_extension":19},"/en-us/blog/mastering-the-all-remote-environment",{"title":4754,"description":4755,"ogTitle":4754,"ogDescription":4755,"noIndex":6,"ogImage":4756,"ogUrl":4757,"ogSiteName":667,"ogType":668,"canonicalUrls":4757,"schema":4758},"Mastering the all-remote environment: My top 5 challenges and solutions","Unlocking potential and overcoming challenges in an all-remote environment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673105/Blog/Hero%20Images/joshua-tree-desert-sunset.jpg","https://about.gitlab.com/blog/mastering-the-all-remote-environment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mastering the all-remote environment: My top 5 challenges and solutions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Shawn Winters\"}],\n        \"datePublished\": \"2019-12-30\",\n      }",{"title":4754,"description":4755,"authors":4760,"heroImage":4756,"date":4762,"body":4763,"category":808,"tags":4764},[4761],"Shawn Winters","2019-12-30","\nSince joining GitLab in late 2018, I’ve experienced a whirlwind of excitement, travel, and continuous change. While GitLab provides the [flexibility](/company/culture/all-remote/benefits/) I always wanted in a career, functioning within an all-remote organization has its [challenges](/company/culture/all-remote/drawbacks/). I’m highlighting these, along with solutions I’ve discovered and engineered, in hopes of helping others who are new to remote work.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/QTPeyRW766Q\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nIn this [GitLab Unfiltered video](https://youtu.be/QTPeyRW766Q) above, I sit down with Darren Murph to talk about working in an all-remote setting, providing a glimpse at what's possible by embracing this style of work.\n{: .note.text-center}\n\n## The lack of physical human interaction\n\nTurns out, I crave human interaction. The buzz of being around others gives me energy. Although small talk and watercooler banter can be distracting at times, social interaction is (overall) a calming and rewarding experience for me.\n\nI overcame this by leveraging technology like Slack and Zoom to [constantly communicate](/company/culture/all-remote/informal-communication/) with my colleagues. I’m surprised by how well these tools simulate the effect of being in the office. In fact, video calls oftentimes add an element of intimacy not found in the office, as I’m frequently able to visit a colleague’s home, coworking space, or favorite workplace. This allows [a more authentic connection](/blog/tips-for-mastering-video-calls/) than what’s typically brought into a colocated office setting.\n\n## Questioning my productivity\n\nI struggled early on without the social validation that comes with working in an office. At GitLab, team members are given autonomy to be a [manager of one](https://handbook.gitlab.com/handbook/values/#managers-of-one), which can take time to fully embrace and appreciate.\n\nTo overcome this, I was intentional about defining what a solid day’s work looked like in my role. I asked myself what things I should aim to accomplish each day, no matter what, to be productive based on [goals and objectives](/company/okrs/) that applied to me. This produced a sense of freedom I had not experienced before, and relieved a mental burden. It also allowed me to spend additional time with family and enjoying hobbies.\n\n## Grappling with a chaotic, overloaded calendar\n\nMeetings are a necessary evil in some instances, but GitLab [views them very differently](/company/culture/all-remote/meetings/) than most organizations. It is important to stay on top of what the company is doing, while also making sure you're up-to-date on your particular business unit. I looked for ways to bridge this gap given that there are no hallway conversations in an all-remote setting.\n\nDespite GitLab’s bias towards [asynchronous communication](/blog/remote-communication/), I still found the quantity of meetings on my calendar to be overwhelming. I felt like I had no time to get my actual job done. As I acclimated to all-remote life, I realized that every meeting was recorded. This allowed me to go back and listen to important meetings during downtime.\n\nI also embraced the reality that many GitLab meetings are optional. Once I understood which meetings were vital to my success, and which were helpful for my knowledge of how the company was operating, I was able to use meetings to my advantage rather than being at the mercy of an overloaded schedule.\n\n## Can you _really_ document everything?\n\nGitLab is a huge proponent of documenting everything in our [company handbook](/handbook/about/#count-handbook-pages). In a typical office setting, there are people around to answer every question. Here, I’m encouraged to search for information first – to see if my question has already been answered and documented – which was a major challenge for me early on. My instinct was to ask someone instead of searching in the handbook, and I realized that part of this stemmed from my desire to take any excuse to socialize with colleagues.\n\nI overcame this by retraining myself and flipping an old habit on its head. If I was unable to find an answer in the handbook, I was not only empowered to seek answers from others, but also to use a [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/#merge-requests) to document the solution and help others.\n\n## Turning my video on\n\nGitLab conducts all meetings – internal and external – using a video conferencing platform. With no offices, we lean on video calls to maintain human contact. As participants in a video conference, we voluntarily enable a face-to-face interaction with a person (or persons) on the other side, which requires some level of courage and humility. Initially, this was a challenge for me. I was very uncomfortable turning my video on, routinely concerned with my appearance, my surroundings, and my background.\n\nI overcame this challenge by embracing GitLab’s reminder that [meetings are about the work, not the background](/company/culture/all-remote/meetings/#meetings-are-about-the-work-not-the-background). By being vulnerable, I learned that bringing my genuine self to a video call enabled me to build stronger relationships with colleagues and prospects. Now, I make it my goal to have my video turned on as much as possible. This has helped me overcome my fear of being self-conscious, while allowing me to engage with more people in a meaningful way.\n\nAs more companies embrace all-remote, it’s important for us to collectively discuss challenges and solutions with one another. We're interested in hearing about challenges faced by others implementing remote work, so we can ideally find and document solutions.\n\nLearn more about [requesting a Pick Your Brain interview on all-remote](/company/culture/all-remote/pick-your-brain/)!\n\n*Cover image by [Darren Murph](https://twitter.com/darrenmurph).*\n",[677,9,832],{"slug":4766,"featured":6,"template":680},"mastering-the-all-remote-environment","content:en-us:blog:mastering-the-all-remote-environment.yml","Mastering The All Remote Environment","en-us/blog/mastering-the-all-remote-environment.yml","en-us/blog/mastering-the-all-remote-environment",{"_path":4772,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4773,"content":4779,"config":4784,"_id":4786,"_type":14,"title":4787,"_source":16,"_file":4788,"_stem":4789,"_extension":19},"/en-us/blog/measuring-engineering-productivity-at-gitlab",{"title":4774,"description":4775,"ogTitle":4774,"ogDescription":4775,"noIndex":6,"ogImage":4776,"ogUrl":4777,"ogSiteName":667,"ogType":668,"canonicalUrls":4777,"schema":4778},"How we measure engineering productivity at GitLab","Learn about how we measure and iterate through this metric","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681533/Blog/Hero%20Images/background.jpg","https://about.gitlab.com/blog/measuring-engineering-productivity-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we measure engineering productivity at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Clement Ho\"}],\n        \"datePublished\": \"2020-08-27\",\n      }",{"title":4774,"description":4775,"authors":4780,"heroImage":4776,"date":990,"body":4782,"category":743,"tags":4783},[4781],"Clement Ho","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-09-02.\n{: .alert .alert-info .note}\n\nOne of the challenges in a rapidly growing engineering organization is determining how your organization's productivity scales over time. Companies that grow quickly often face a slow down in output because of inefficiencies and communication challenges. For example, a task that you used to be able to ask another coworker to do may now need a comprehensive approval flow.\n\nAt GitLab, we went from 100 to 280 engineers in 1.5 years. As a startup, it was critical that we continued our momentum of:\n\n![Shipping monthly releases => Provide more value to users => Increasing revenue](https://about.gitlab.com/images/blogimages/measuring-engineering-productivity/momentum.png){: .shadow.center}\n\nAs a result, we created several [Key Performance Indicators](/company/kpis/#what-are-kpis) (KPIs) and Performance Indicators (PIs) around this:\n\n- [Throughput](/handbook/engineering/development/performance-indicators/#throughput)\n- [Product MRs Review to Merge time (RTMT)](/handbook/engineering/development/performance-indicators/#review-to-merge-time-rtmt)\n- [Development Department Member MR Rate](/handbook/engineering/development/performance-indicators/#development-department-member-mr-rate)\n- [Say Do Ratio](/handbook/engineering/development/performance-indicators/#say-do-ratios)\n- [Product MRs by Type](/handbook/engineering/development/performance-indicators/#product-mrs-by-type)\n\nThe primary one that is often discussed in engineering leadership at GitLab is [Merge Request (MR)](/features/continuous-integration/) Rate.\n\nIn this blog post, I'll take a deep dive into how we measure engineering productivity at GitLab using MR Rate, the challenges we've encountered, and what we do to increase this metric. I hope that through this, you'll have a deeper understanding of how we operate at GitLab and inspire you to reflect on how your organization measures engineering productivity.\n\n## What is MR Rate?\n\n![MR Rate = (Total MRs for a team in a given month)/(number of team members employed during that month)](https://about.gitlab.com/images/blogimages/measuring-engineering-productivity/mr-rate-formula.jpeg){: .shadow.center}\n\n**Note:** We include management roles in the team count because we want this metric to be a team metric and want managers to be accountable for their team's metric.\n\nWe use this metric because:\n\n1. We want to incentivize everyone to [iterate](https://handbook.gitlab.com/handbook/values/#iteration) and break down work into smaller MRs because smaller MRs have a faster review time and get merged faster (better developer and maintainer review experience)\n1. The quicker we can deliver features to users, the faster we can iterate upon them\n1. Every MR into the codebase improves the codebase, and every improvement has the downstream effect of making the product better\n\nWhen viewed at an organization level, this metric helps us understand how productivity in the organization changes over time. Although this metric seems simple, it actually requires a lot of detailed analysis as there are many situations to examine:\n\n- New team vs. established team\n- Team performance issues (blocking work or incorrect iteration work breakdown)\n- Individual growth (and performance management)\n- [Community contributions](/handbook/marketing/developer-relations/contributor-success/) vs. independent team contributions\n- Operational productivity constraints\n\nAt first, we measured MRs based on labels associated with the product domain (which generally maps to an existing engineering team). As an open core company, this allowed us to easily aggregate community contributions into the metric. We wanted to account for them because we want to continue encouraging team members to support community contribution MRs and recognize that these MRs continue to help provide the product with more value to users.\n\nUnfortunately, as our organization grew over time, this metric became confusing. Although we had a bot that would label MRs, we occasionally had bad data and mislabeled MRs. In addition, certain teams with product areas that were more mature had more community contributions than others. The combination of these issues evolved the metric into multiple types.\n\n- MR Rate measured through labeling\n- Team MR Rate measured through MR authorship (also known as Narrow MR Rate)\n\nIt's likely that over time this may continue to evolve but for now, these new types of MR Rates have brought more clarity within our organization.\n\n## What are the challenges with MR Rate?\n\nThere are many challenges, but we'll highlight a few notable ones.\n\nFirst of all, one metric never tells the full story. One of the challenges we faced as we hyper focused on this metric was being biased to the number given by the metric rather than truly understanding the story surrounding the metric. For example, a team with a high MR Rate could be shipping quantity over quality. By the MR Rate measurement alone, the organization could unintentionally exemplify teams with unstable features.\n\nIn order to avoid these types of situations, we first ensure that we clearly define our [Definition of Done](https://docs.gitlab.com/ee/development/contributing/merge_request_workflow.html#definition-of-done) and our [maintainer](/handbook/engineering/workflow/code-review/#maintainer) [review process](https://docs.gitlab.com/ee/development/code_review.html). This allows us to set a baseline for quality so that we can set clear expectations in the organization and create clear guidance when MRs are below our standards for quality.\n\nIn addition, we also use other metrics to get a fuller understanding of the story and we regularly introspect about our numbers. We intentionally accompany MR Rate with a few other metrics such as [Product MRs by Type](/handbook/engineering/development/performance-indicators/#product-mrs-by-type) to better understand the distribution of MRs and [Say Do Ratio](/handbook/engineering/development/performance-indicators/#say-do-ratios) (this is our latest addition, we're still iterating on it) to better understand how the teams are performing relative to what they committed with product management during the development milestone. We generally use MR Rate to observe trends and regularly ask ourselves, “why is this trending down?” as well as “why is this trending up so much? Is there something that this team is doing that other teams can learn from?”. These are some techniques we use to keep ourselves accountable for understanding the broader picture of the metric.\n\nAnother challenge we faced with MR Rate is balancing it between a team vs. individual metric. As an organization, we want MR Rate to trend upwards over time, and we want to hold engineering leaders accountable for their teams. Engineering directors are responsible for their (organization) sub-department's metrics, and engineering managers are responsible for their team's metrics respectively.\n\nWe intentionally chose not to make MR Rate an individual metric because we do not want to encourage siloed, non-collaborative behavior. For example, we do not want a team member to feel disincentivized to review other team members' MRs or unblock others. This is especially important because [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) is a company value. Although actions such as making an MR Rate leaderboard could potentially increase the metric for the organization, we have intentionally chosen not to do that because we want to encourage collaboration. We also chose not to use MR Rate as a metric for a team member's underperformance.\n\nThis conscious decision is tricky (especially for smaller teams) because it can be rather difficult for engineering managers to increase their team's MR Rate trends without discussing individual metrics. When teams have less team members, each team member's total MRs in a month would be more impactful to the team's overall MR Rate compared to a larger team. Different teams have attempted to address this in different ways which we will explain in the next section.\n\n## How do we increase MR Rate?\n\nWe use four primary strategies to increase MR Rate.\n\n1. Improving iteration\n1. Setting KPIs\n1. Setting goals (OKRs) to increase KPI\n1. Empowering teams to improve efficiencies\n\nImproving iteration is our primary strategy because team members who are better at iterating are able to create smaller MRs, which results in a higher MR Rate. In our experience, iteration is easy to conceptualize but difficult to apply. Our organization put together some resources (including a [training template](https://gitlab.com/gitlab-com/Product/-/blob/master/.gitlab/issue_templates/iteration-training.md)), and our CEO has set up Iteration Office Hours as an opportunity to coach (most of which are also available publicly on [YouTube](https://www.youtube.com/c/GitLabUnfiltered/search?query=iteration+office+hours+with)).\n\nFrom an organizational perspective, we use KPIs to monitor our MR Rate. Our organization tracks our [Development Department Narrow MR Rate](/handbook/engineering/development/performance-indicators/#development-department-narrow-mr-rate) as our primary KPI with a description, a chart with current and historical data, and a predefined target. As of writing this article, our target is 10, and we are trending toward that target over time.\n\n![Development Department Narrow MR Rate](https://about.gitlab.com/images/blogimages/measuring-engineering-productivity/dept-mr-rate.png){: .shadow.center}\n\n_KPI chart as of August 24, 2020_\n\nEach sub-department under the development department also has their dashboards available publicly (though these dashboards are not as organized and easy to find as the KPI). For example, the Ops sub-department tracks this on their specific [handbook page](/handbook/engineering/development/ops/#ops-sub-department-performance-indicators). We are currently working on consolidating these charts. These KPI dashboards make it easy to understand how the organization is performing and allow us to keep it top of mind.\n\nIn addition to KPIs, each fiscal quarter, engineering management uses these indicators to determine how to set OKRs. In previous quarters, OKRs were set to raise MR Rate to higher targets. This quarter's goal, in light of COVID's long lasting implications, is to maintain the target, because we understand that the current situation is affecting everyone differently. OKRs help align the organization toward the same goals so that everyone understands and can contribute to these goals.\n\nFrom a team perspective, we also empower our engineering managers to experiment with processes to improve efficiency but stay mindful of maintaining healthy work life balance. Some engineering managers choose to use individual MR Rate values as a means of coaching and understanding more about each team member's merge requests. For example, a team member may have a lower MR Rate because he/she is a maintainer, and because of the number of MR reviews received, is unable to have completed as many MRs as he/she could do. Some teams also look through their team's MR Rate on a weekly basis and provide commentary to their directors as a means of understanding more about the metric in order to improve it over time.\n\n## Recap\n\nThe MR Rate is how we've chosen to measure and increase engineering productivity at GitLab. It's not perfect, but we're constantly iterating to make it better. We have yet to determine what our ceiling is or whether we've already reached it but we will definitely share with the wider community when we get to that point. What metrics do you use to measure your organization's engineering productivity? Do you have suggestions or comments about MR Rate? Leave a comment below, and we'll read through them and do our best to respond.\n\n# Special thanks\n\nThanks to the following engineering leaders at GitLab who opened up their calendars to share their insights on this topic:\n\n- [Eric Johnson](/company/team/#edjdev), executive vice president of Engineering\n- [Christopher Lefelhocz](/company/team/#clefelhocz1), vice president of Development\n- [Wayne Haber](/company/team/#whaber), director of Engineering, Threat Management\n- [Sam Goldstein](/company/team/#sgoldstein), director of Engineering, Op\n- [Tim Zallmann](/company/team/#timzallmann), director of Engineering, Dev\n- [Chun Du](/company/team/#cdu1), director of Engineering, Enablement\n- [Bartek Marnane](/company/team/#bmarnane), director of Engineering, Growth\n- [Todd Stadelhofer](/company/team/#tstadelhofer), director of Engineering, Secure\n- [Darby Frey](/company/team/#darbyfrey), senior manager, Engineering, Verify\n- [Daniel Croft](/company/team/#dcroft), senior manager, Engineering, Package and Release\n\nCover image by [Frank Mckenna](https://unsplash.com/@frankiefoto) on [Unsplash](https://unsplash.com/photos/4V8JxijgZ_c)\n{: .note}\n",[9],{"slug":4785,"featured":6,"template":680},"measuring-engineering-productivity-at-gitlab","content:en-us:blog:measuring-engineering-productivity-at-gitlab.yml","Measuring Engineering Productivity At Gitlab","en-us/blog/measuring-engineering-productivity-at-gitlab.yml","en-us/blog/measuring-engineering-productivity-at-gitlab",{"_path":4791,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4792,"content":4798,"config":4803,"_id":4805,"_type":14,"title":4806,"_source":16,"_file":4807,"_stem":4808,"_extension":19},"/en-us/blog/meltano-follow-up",{"title":4793,"description":4794,"ogTitle":4793,"ogDescription":4794,"noIndex":6,"ogImage":4795,"ogUrl":4796,"ogSiteName":667,"ogType":668,"canonicalUrls":4796,"schema":4797},"Thanks for all the feedback and interest in Meltano!","Last week we introduced Meltano, and we're so excited to be building our community and working with you on our MVP.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678909/Blog/Hero%20Images/thanks-for-all-the-feedback-and-interest-in-meltano.jpg","https://about.gitlab.com/blog/meltano-follow-up","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Thanks for all the feedback and interest in Meltano!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2018-08-07\",\n      }",{"title":4793,"description":4794,"authors":4799,"heroImage":4795,"date":4800,"body":4801,"category":299,"tags":4802},[3134],"2018-08-07","\nWe recently wrote a [post introducing Meltano](/blog/hey-data-teams-we-are-working-on-a-tool-just-for-you/), an open source tool that will help data teams version control everything from raw data to visualization. We were blown away by the level of interest it received, including all sorts of comments on [Hacker News](https://news.ycombinator.com/item?id=17667399) that gave us a ton of feedback we’re excited to wrestle with and work towards. Special thanks to commenter [slap_shot](https://news.ycombinator.com/item?id=17668089), whose comments prompted us to hop on YouTube for a live conversation. We learned that in real life, slap_shot is a data and analytics engineer and founder named Brett, and you can watch our live chat here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/F8tEDq3K_pE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nBrett told us something we suspected after our own experience of assembling our analytics stack – that pretty much every data team he encounters is using a \"multitude of internal processes that are broken and cobbled together for data integration, or they're not comfortable with the pricing and sales process for some of these products.\"\n\nWhen we started researching tools for our team, the goal was to use only open source. Unfortunately, the best open source that we could find wasn't up to the task for us, and changing the code proved cumbersome due to licensing issues. We settled on Looker, a fantastic (but proprietary) solution for visualization, and began reluctantly building out other parts ourselves. Brett told us the idea of an open source version of Looker could be really promising – it's too expensive for many teams, including, to some extent, our own. We think it doesn't make sense to build a dashboard and not be able to share it with the whole team.\n\nSid shares, \"We spent months assembling our data pipeline... but all these choices were so hard, and I think there's room for a convention over configuration framework, where you type in your Salesforce API keys and you get the proper Salesforce graphs. We want to get as close as possible to that experience.\"\n\n### Issues and next steps\n\n*  The Meltano team is building a set of very common core extractors, including Salesforce, Marketo, Zendesk, etc. This way we can hopefully provide a few of the most important sources out of the box, and substantial initial value. Then, being an open source product, we hope others can contribute and increase the breadth of support.\n*  The data team is going to try to apply Meltano to a machine learning project, probably around predicting probability of winning a sales opportunity, so we can incorporate any requirements specific to ML.\n\n### Q&A\n\n#### Give me the short and sweet version – what does Meltano do?\n\n[Meltano helps](https://gitlab.com/meltano/meltano/#roadmap) companies consolidate, organize, and analyze their data to make better business decisions.\n\n#### Can the BI tool and integration library be used outside of GitLab?\n\nWe're not sure yet. For now, the integration part (which we call orchestration) is GitLab CI-based. We recently had the idea to have a frontend \"production mode,\" where you can at least see everything, and maybe we'll have a \"development mode\" where you can run different pipelines inside a Python Flask app.\n\n#### Embulk and Singer built the core foundation and they allow people to build their own integrations, do we envision similar model?\n\nYes. Right now we are prioritizing getting the architecture and tooling correct, to make it easy for us and others to build additional extractors.\n\n#### What's the vision for the monorepo and what are the benefits?\n\nWe consolidated all of the code for Meltano in a single project, to make it easier to develop and contribute to. We then provide two Meltano Docker images, similar to Jupyter notebook layering: a standard image which contains all of the default extractors and loaders, as well as a base image so users can customize it to contain only what they need. `meltano/analytics` is both a prototypical Meltano implementation and the repo for GitLab Analytics.\n\n#### Would I *have* to use Meltano for everything?\n\nNo! We know teams have different needs and preferences, so you would be able to pick and choose the features that you use.\n\n#### I'd like to see GitLab CI have a clean API for others to plug into. Do you see that happening?\n\nThe Data team is committed to using GitLab CI as our orchestration platform. [Airflow](https://airflow.apache.org/) is state of the art right now, but we think we can have similar or better features within CI. If appropriate, the Meltano team will contribute back to CI to make it better too. Some features we're excited about would be better statistics across jobs, sub-pipelines and directed acyclic graphs of jobs, and intelligent data backfill support.\n\n#### This sounds really ambitious, and there are a lot of companies in the data integration space.\n\nYou're completely right! But there isn't an open source tool that checks all these boxes. It might sound a bit ludicrous, but as Sid says, \"When I saw GitLab for the first time, it made sense that something you collaborate on is also something you contribute to... it makes sense to me that it's not an individual burden, it's a shared burden.\" We think that the shared nature of the problem will make for a great open source community, and without that community, this won't really get off the ground.\n\nPhoto by [Ludovic Toinel](https://unsplash.com/photos/nGwyaWKFRVI) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,745],{"slug":4804,"featured":6,"template":680},"meltano-follow-up","content:en-us:blog:meltano-follow-up.yml","Meltano Follow Up","en-us/blog/meltano-follow-up.yml","en-us/blog/meltano-follow-up",{"_path":4810,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4811,"content":4817,"config":4821,"_id":4823,"_type":14,"title":4824,"_source":16,"_file":4825,"_stem":4826,"_extension":19},"/en-us/blog/meltano-functional-group-update-post",{"title":4812,"description":4813,"ogTitle":4812,"ogDescription":4813,"noIndex":6,"ogImage":4814,"ogUrl":4815,"ogSiteName":667,"ogType":668,"canonicalUrls":4815,"schema":4816},"New Meltano personas, priorities, and updates from the team","There's a lot going on — here are some of the highlights on user research, dogfooding Meltano, embedding engineers, and hiring!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678847/Blog/Hero%20Images/meltano-fgu.jpg","https://about.gitlab.com/blog/meltano-functional-group-update-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New Meltano personas, priorities, and updates from the team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2018-10-08\",\n      }",{"title":4812,"description":4813,"authors":4818,"heroImage":4814,"date":1976,"body":4819,"category":743,"tags":4820},[3134],"\nJacob Schatz here, Staff Engineer for [Meltano](https://gitlab.com/meltano)! We've been heads down working on improving Meltano, and figured it was time for an update. We've had some great conversations that have helped us identify two general personas. Our team is also growing, and we're ready for frontend contributions, but more on that later.\n\nWe've been conducting interviews to zero in on what our users will want, what they're currently doing, and what tools they're using. Over the course of those conversations, we saw two main scenarios emerge. People either wanted a command line interface (CLI) or a graphical user interface (GUI). The GUIs that exist are painful to use, and not very intuitive. In both scenarios, people we spoke with are frustrated. This goes back to the original reason [we decided to create Meltano](/blog/hey-data-teams-we-are-working-on-a-tool-just-for-you/) — our data team members were relying on frustrating and expensive toolsets with poor UIs.\n\n### What are the Meltano personas?\n\nOur conversations revealed two general types of users:\n* Users who have engineers on staff\n* Users who do not have engineers on staff, or their engineers do not have bandwidth to help them\n\nThe Data team at GitLab, for example, has data engineers on staff who are willing, able, and happy to write Python. We won't be able to write every extractor and loader, so our users can follow our [specifications](https://gitlab.com/meltano/specifications), which are based off of the [Singer specifications](https://github.com/singer-io/getting-started). We want to make that as easy as possible, so Meltano can be the glue between all these different pieces.\n\nFor the other teams who don’t have the technical resources, we want to make it as if they had engineers on staff. Ideally, they'll just need to click a couple of buttons, run extract, load and transform with the extractors and loaders that we already have. Hopefully in the future the community can contribute more to these types of different extractors and loaders.\n\nYou can check out our updated [readme](https://gitlab.com/meltano/meltano/blob/master/README.md) with more info about Meltano and our personas. We're working iteratively, so if you have a different setup or scenario to share, we want to hear from you about your experience! Get in touch with us and tell us about your struggles or successes with your data team.\n\n### What’s next?\n\nWe're focused on our own CLI and GUI, and continuing to build more extractors and loaders (or [\"taps and targets\"](https://www.singer.io/)). We will be the glue that ties everything together. While current Singer taps and targets support extracting and loading, we'll be supporting much more, like removal of PII. Our CLI will support all of this from one configuration. We also want the CLI to have a really nice user experience, so I'm working with GitLab UX to help make it happen.\n\nAs always, we’re looking for contributors! In the [Dashboard project](https://gitlab.com/meltano/dashboard) you’ll see the Chart.js library that I’m building to make really nice dashboards for Meltano. Although we've had a ton of great Python contributions, we haven’t had as many contributors to the frontend, so we’d love your help there.\n\n### In other news\nThere's a lot going on, here are some of the highlights!\n\n#### Dogfooding\nIn my experience, unless one experiences the direct results of the code they write, and feel the pain their users feel when they hit a bug, one might not correctly solve the problem. Currently, we fulfill the data team's requests, but if something doesn't work they merely report back to us, without us experiencing the pain ourselves. We're changing how we work in order to imprint the idea that if something is broken, it's the Meltano team's responsibility. We’re all investigating every single pipeline failure, regardless of whose “fault” it is, because these suggest that it may be a poor user experience.\n\n#### Embedded engineers\nIn order to dogfood better, we've taken a data engineer from the data team, and an engineer from the Meltano team. They split their work 50/50 so each does half of their usual work and half of each other's work. It's already made a huge difference by giving us more eyes and ears on lots of issues, and allowing the engineers to approach problems from a different angle. Another added benefit is that every Meltano engineer gets direct exposure and experience from the data team, to make them better data scientists as well product engineers.\n\nThat's all for now, get in touch with us in our [issue tracker](https://gitlab.com/groups/meltano/-/boards), and tweet us [@meltanodata](https://twitter.com/meltanodata)!\n\nCover [image](https://unsplash.com/photos/2FPjlAyMQTA) by [John Schnobrich](https://unsplash.com/@johnschno) on Unsplash\n{: .note}\n\n[Emily von Hoffmann](https://about.gitlab.com/company/team/#emvonhoffmann) contributed to this post.\n{: .note}\n",[993,677,1297,9,723],{"slug":4822,"featured":6,"template":680},"meltano-functional-group-update-post","content:en-us:blog:meltano-functional-group-update-post.yml","Meltano Functional Group Update Post","en-us/blog/meltano-functional-group-update-post.yml","en-us/blog/meltano-functional-group-update-post",{"_path":4828,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4829,"content":4835,"config":4840,"_id":4842,"_type":14,"title":4843,"_source":16,"_file":4844,"_stem":4845,"_extension":19},"/en-us/blog/merge-trains-explained",{"title":4830,"description":4831,"ogTitle":4830,"ogDescription":4831,"noIndex":6,"ogImage":4832,"ogUrl":4833,"ogSiteName":667,"ogType":668,"canonicalUrls":4833,"schema":4834},"How to use merge train pipelines with GitLab","Read here an introduction on what merge trains are, how to use them and how to incorporate them to your GitLab project.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667210/Blog/Hero%20Images/merge-train-explained-banner.jpg","https://about.gitlab.com/blog/merge-trains-explained","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use merge train pipelines with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2020-12-14\",\n      }",{"title":4830,"description":4831,"authors":4836,"heroImage":4832,"date":4837,"body":4838,"category":743,"tags":4839},[1755],"2020-12-14","This blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-01-20.\n{: .alert .alert-info .note}\n\n[Merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) is a powerful GitLab feature that empowers users to harness the potential of [pipelines for merge results](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) to the fullest and also automatically merge a series of (queued) merge requests (MRs) without breaking the target branch. However, due to the structural complexity of the concept, users are often unable to use it effectively for their projects and play it safe by restricting their usage to MRs that pose minimum or no conflict with the target branch.\n\nAs a [senior product designer for Continuous Integration (CI)](/company/team/#veethikaa), I often deconstruct certain concepts and logic for features related to CI so that I have a strong foundation of understanding when making design proposals. Recently, I had a chance to hold a discussion around a very interesting feature - merge trains — with the team. This post unpacks the concept of merge trains by explaining the difference between merge trains, pipelines for MRs, and pipelines for merge results.\n\n## Pipelines for merge requests\n\nGenerally, when a new merge request is created, a pipeline runs to check if the new changes are eligible to be merged to the target branch. This is called the pipeline for merge requests (MRs). A good practice is to only keep the necessary jobs for validating the changes at this step, so the pipeline doesn’t take a long time to complete and CI minutes are not overused. GitLab allows users to [configure the pipeline for MRs](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) by adding `rules:if: $CI_MERGE_REQUEST_IID` to the jobs they wish to run for MRs.\n\n![Pipeline for merge request](https://about.gitlab.com/images/blogimages/merge-train-explained-pipeline-for-merge-requests.jpg)\n\n### Pipelines for merge results\n\nMerge request pipelines verify the branch in isolation. The target branch may change several times during the lifetime of the MR, and these changes are not taken into consideration. In the time during which the pipeline for the MR runs (and succeeds), if the target branch progresses in the background and a user merges the changes to the target branch, they might eventually end up with a broken target.\n\nWhen a [pipeline for merge results](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) runs, GitLab CI performs a _pretend_ merge against the updated target branch by creating a commit on an internal ref from the source branch, and then runs a pipeline against it. This pipeline validates the result prior to merging, therefore increasing the chances of keeping the target branch green.\n\n![Pipeline for merge results](https://about.gitlab.com/images/blogimages/merge-train-explained-pipeline-for-merge-results.jpg)\n\nWe should keep in mind that this pipeline does not run automatically with every update to the target branch. To learn more about this feature in detail and understand the process of enabling it in your GitLab instance, you can refer to the [official documentation on merge results](https://docs.gitlab.com/ee/ci/pipelines/merged_results_pipelines.html).\n\nHowever, if a long time has passed since the last successful pipeline ran, by the time the MR is ready to be merged, the target branch may have already changed and advanced. If we go ahead and merge your MR without re-running the pipeline for MRs, we could end up with a broken target branch. Merge trains can prevent this from happening.\n\n### About merge trains\n\nPipeline for merge results is an extremely useful feature in itself, but tracking the right slot to merge the feature branch into the target and remembering to run the pipeline manually before doing so is a lot to expect from a developer buried in tasks that involve deep logical thinking.\n\nTo tackle this complexity in workflow, GitLab introduced [the merge trains feature](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) in [GitLab Premium 12.0](/releases/2019/06/22/gitlab-12-0-released/#sequential-merge-trains). Merge trains allow users to capitalize on the capabilities of pipelines for merge results to automate the process of merging to the target branch with minimum chances of breaking it.\n\nWith merge trains enabled, a merge request can be added to the train, which takes care of it until merged.\nA merge train can be imagined as a queue of MRs that is automatically managed for you.\n\n#### How do merge trains work?\n\nWhen users queue up their MRs in a merge train, GitLab performs a pretend merge for each source branch on top of the previous branch in the queue, where the first branch on the train is merged against the target branch.\nBy creating a temporary commit for each of these merges, GitLab can run merged result pipelines.\nThe first MR in the queue, after having a successful pipeline run for MRs, gets merged to the target branch.\n\nEvery time a merge request is merged into the target branch, the pipelines for the newly added MRs in the train would run against the target branch and the newly added changes from the recently merged MR and changes that are from MRs already in the train.\n\n![Pipeline for merge results](https://about.gitlab.com/images/blogimages/merge-train-explained-working.gif)\n\nMerge trains carry an immense possibility for innovation with GitLab as a toolchain. But to be able to build upon the concept, it is imperative to have a holistic understanding of the same at the system level.\n\nHopefully, this post does the job of breaking down the concept into layman's terms, thereby opening doors for future collaboration within [stage groups](/handbook/product/categories/) at GitLab.\n\nHave suggestions around improving merge trains? please leave your thoughts on this [epic](https://gitlab.com/groups/gitlab-org/-/epics/5122).\n",[1090,1293,677,1440,9],{"slug":4841,"featured":6,"template":680},"merge-trains-explained","content:en-us:blog:merge-trains-explained.yml","Merge Trains Explained","en-us/blog/merge-trains-explained.yml","en-us/blog/merge-trains-explained",{"_path":4847,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4848,"content":4853,"config":4859,"_id":4861,"_type":14,"title":4862,"_source":16,"_file":4863,"_stem":4864,"_extension":19},"/en-us/blog/merging-ce-and-ee-codebases",{"title":4849,"description":4850,"ogTitle":4849,"ogDescription":4850,"noIndex":6,"ogImage":735,"ogUrl":4851,"ogSiteName":667,"ogType":668,"canonicalUrls":4851,"schema":4852},"GitLab might move to a single Rails codebase","We're considering moving towards a single Rails repository by combining the two existing repositories – here's why, and what would change.","https://about.gitlab.com/blog/merging-ce-and-ee-codebases","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab might move to a single Rails codebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marin Jankovski\"}],\n        \"datePublished\": \"2019-02-21\",\n      }",{"title":4849,"description":4850,"authors":4854,"heroImage":735,"date":4856,"body":4857,"category":743,"tags":4858},[4855],"Marin Jankovski","2019-02-21","\n\n## A single repository with no license changes\n\nBefore we go into the details of the proposed changes, we want to stress that:\n\n* GitLab Community Edition code would remain open source and MIT licensed.\n* GitLab Enterprise Edition code would remain source available and proprietary.\n\n## What are the challenges with having two repositories?\n\nCurrently the Ruby on Rails code of GitLab (the majority of the codebase) are maintained in two repositories.\nThe [gitlab-ce] repository for the code with an open source license and the [gitlab-ee] repository containing code with a proprietary license which is source available.\n\nFeature development is difficult and error prone when making any change at GitLab in two similar yet separate repositories that depend on one another.\n\nBelow are a few examples to demonstrate the problem:\n\n### Duplicated work during feature development\n\nThis [frontend only Merge Request](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/7376) required a [backport to CE repository](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/22158). Backporting included creating duplicate work to avoid future conflicts as well as changes to the code to support the feature.\n\n### A simple change can break master\n\nA simple [change in a spec in CE repository](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24801)\nfailed the [pipeline in the master branch](https://gitlab.com/gitlab-org/gitlab-ee/issues/9621). After hours of investigation, an [MR reverting the change](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24961) was created, as well as a [second to address the problem](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24983).\n\n### Conflicts during preparation for regular releases\n\n This concerns preparation for a regular release, e.g. [11.7.5 release](https://gitlab.com/gitlab-org/release/tasks/issues/659). Merge requests preparing the release for both the [CE repository](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24941) and [EE repository](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/9441) need to be created and once the pipelines pass, the EE repository requires a merge from the CE repository. This causes additional conflicts, pipeline failures, and similar delays requiring more manual intervention during which the CE distribution release is also delayed.\n\nBetween these three examples, _days_ of engineering time has been spent on [busy work], delaying the delivery of work that brings actual value. Only three examples are highlighted, but this type of work occurs daily.\nWhether writing a new feature available in Core, or any of the enterprise plans, all are equally affected.\n\nMore details on the workflows and challenges can be found in the [working in CE and EE codebases blueprint] document.\n\n## What have we done to improve the situation?\n\nWe've invested significant development time to try and keep the two repositories separate:\n\n### Pre-2016: Manual merges for each release\n\n Prior to 2016, merging the CE repository into the EE repository was done when we were ready to cut a release; the number of commits was small so this could be done by one person.\n\n### 2016-2017: Daily merges by a team of developers\n\nIn 2016, the number of commits between the two repositories grew so the task was divided between seven (brave) developers responsible for merging the code once a day. This worked for a while until delays started happening due to failed specs or difficult merge conflicts.\n\n### 2017-2018: Automated merges every three hours\n\nAt the end of 2017, we merged an [MR that allowed the creation of automated MRs between the two repositories](https://gitlab.com/gitlab-org/release-tools/merge_requests/86), mentioning individuals to resolve conflicts. This task ran every three hours, allowing for a smaller number of commits to be worked on. You can read more about our [automated CE to EE merge here](/blog/using-gitlab-ci-to-build-gitlab-faster/).\n\n### Present: Further automation with Merge Train\n\nBy the end of 2018, the number of changes going into both the CE and EE repositories grew to thousands of commits in some cases, which made the automated MR insufficient. The [Merge Train](https://gitlab.com/gitlab-org/merge-train) tool was created to automate these workflows further, by automatically rejecting merge conflicts and preferring changes from one repository over the other. The edge cases we've encountered are requiring us to invest additional time in improving the custom tool.\n\nThis last attempt turned out to be a bit of a crossroads. Do we invest more development time in improving the custom tooling, knowing that we will never get it 100 percent right, or do we need to take some more drastic measures that are going to save countless hours of development time?\n\n## What are we proposing?\n\nOne of GitLab's core [values] is efficiency. As previously mentioned, merging the [gitlab-ce] Rails repository into the [gitlab-ee] Rails repository is proving to be inefficient.\n\nThe Rails repository is one of many base repositories of which GitLab consists. The [gitlab-ce] repository is a part of a [gitlab-ce distribution] package which offers only the Core [feature set]. Similarly, the [gitlab-ee] repository is part of a [gitlab-ee distribution] package which has a larger feature set available. See the image below:\n\n![CE-EE-Before](https://about.gitlab.com/images/blogimages/merging-ce-and-ee-codebases/community-enterprise-before.png){: .medium.center}\n\nThe change we are proposing would merge the [gitlab-ce] and [gitlab-ee] repositories into a single [gitlab] repository. This change is reflected below:\n\n![CE-EE-After](https://about.gitlab.com/images/blogimages/merging-ce-and-ee-codebases/community-enterprise-after.png){: .medium.center}\n\nThe [design for merging two codebases] outlines the required work and process changes in detail. The proposed change would pertain only to the Ruby on Rails repository, and I've summarized it below.\n\n### So, what changes?\n\n* The [gitlab-ce] and [gitlab-ee] repositories are replaced with a single [gitlab] repository, with all open issues and merge requests moved into the single repository.\n* All frontend assets (JavaScript, CSS, images, views) will be open sourced under the MIT license.\n* All proprietary backend code is located in the `/ee` repository.\n* All documentation is merged together and clearly states which features belong to which [feature set]. Documentation is [already licensed under CC-BY-SA](https://gitlab.com/gitlab-org/gitlab-ce/issues/42891).\n\n### What remains unchanged?\n\n* The [gitlab-ce distribution] package remains fully open source under the same license.\n* All code outside of the `/ee` directory in the single [gitlab] repository is open source.\n* All code in the `/ee` directory remains proprietary with source code available.\n* Other projects, such as [gitlab-shell], [gitaly], [gitlab-workhorse], [gitlab-pages], remain unchanged.\n\n### What are the possible downsides?\n\nWe want to be clear about the possible downsides of this approach:\n\n* Users with installations from source currently cloning the [gitlab-ce] repository would download from a new repository named [gitlab]. The clone will also fetch the proprietary code in `/ee` directory, but removing this directory has no effect on running application.\n\n     ➡️ This is resolved by removing the `/ee` directory after cloning.\n* [gitlab-ce distribution] users would get more database tables because of the new tables in `db/schema.rb`. Database schema is open source and in the [gitlab-ce distribution] these new tables would not be populated, affect performance, or take significant space.\n\n     ➡️ All database migration code is open source and does not add additional maintenance burden, so no additional work is required.\n\n## What's next?\n\nWe currently think that the efficiency gains and clearer naming outweighs these disadvantages. Our [stewardship of GitLab](/company/stewardship/) is an important aspect of GitLab's success as a whole, so we would love to know:\n\n* Is there a better way to accomplish to solve the problem of the [busy work]?\n* What improvements can we make to our proposal?\n* Are there any additional considerations that we should take into account?\n\nWe invite you to share your suggestions in [issue 2952](https://gitlab.com/gitlab-org/gitlab-ee/issues/2952), which was an inspiration for the proposal as it currently stands. We look forward to hearing your thoughts!\n\nCover image from [Unsplash](https://images.unsplash.com/photo-1512217536414-d92543c79ca1)\n{: .note}\n\n[values]: https://handbook.gitlab.com/handbook/values/\n[gitlab-ce]: https://gitlab.com/gitlab-org/gitlab-ce\n[gitlab-ce distribution]: https://packages.gitlab.com/gitlab/gitlab-ce\n[gitlab-ee distribution]: https://packages.gitlab.com/gitlab/gitlab-ee\n[gitlab-ee]: https://gitlab.com/gitlab-org/gitlab-ee\n[gitlab]: https://gitlab.com/gitlab-org/gitlab\n[gitlab-shell]: https://gitlab.com/gitlab-org/gitlab-shell\n[gitaly]: https://gitlab.com/gitlab-org/gitaly\n[gitlab-workhorse]: https://gitlab.com/gitlab-org/gitlab-workhorse\n[gitlab-pages]: https://gitlab.com/gitlab-org/gitlab-pages\n[feature set]: /pricing/feature-comparison/\n[busy work]: https://en.wikipedia.org/wiki/Busy_work\n[working in CE and EE codebases blueprint]: https://gitlab.com/gitlab-com/gl-infra/readiness/-/tree/master/library/ce-ee-codebases\n[design for merging two codebases]: https://gitlab.com/gitlab-com/gl-infra/readiness/-/tree/master/library/merge-ce-ee-codebases\n",[9,267,277,745],{"slug":4860,"featured":6,"template":680},"merging-ce-and-ee-codebases","content:en-us:blog:merging-ce-and-ee-codebases.yml","Merging Ce And Ee Codebases","en-us/blog/merging-ce-and-ee-codebases.yml","en-us/blog/merging-ce-and-ee-codebases",{"_path":4866,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4867,"content":4873,"config":4879,"_id":4881,"_type":14,"title":4882,"_source":16,"_file":4883,"_stem":4884,"_extension":19},"/en-us/blog/migrating-to-puma-on-gitlab",{"title":4868,"description":4869,"ogTitle":4868,"ogDescription":4869,"noIndex":6,"ogImage":4870,"ogUrl":4871,"ogSiteName":667,"ogType":668,"canonicalUrls":4871,"schema":4872},"How we migrated application servers from Unicorn to Puma","It's been a long journey but with the release of GitLab 13.0 Puma is our default application server. Here's what we did and learned along the way.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681413/Blog/Hero%20Images/appserverpuma.jpg","https://about.gitlab.com/blog/migrating-to-puma-on-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we migrated application servers from Unicorn to Puma\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Craig Gomes\"}],\n        \"datePublished\": \"2020-07-08\",\n      }",{"title":4868,"description":4869,"authors":4874,"heroImage":4870,"date":4876,"body":4877,"category":743,"tags":4878},[4875],"Craig Gomes","2020-07-08","\n\nIt’s been years in the making, but our journey to migrate our application servers from Unicorn to Puma is complete. With the Gitlab 12.9 release Puma was running on GitLab.com and now with 13.0 it is the default application server for everyone. This is the story about how we migrated from Unicorn to Puma and the results we’ve seen.\n\n## A starting point\n\nBoth [Unicorn](https://yhbt.net/unicorn/) and [Puma](https://puma.io) are web servers for Ruby on Rails. The big difference is that Unicorn is a single-threaded process model and Puma uses a multithreaded model. \n\nUnicorn has a multi-process, single-threaded architecture to make better use of available CPU cores (processes can run on different cores) and to have stronger fault tolerance (most failures stay isolated in only one process and cannot take down GitLab entirely). On startup, the Unicorn ‘main’ process loads a clean Ruby environment with the GitLab application code, and then spawns ‘workers’ which inherit this clean initial environment. The ‘main’ never handles any requests; that is left to the workers. The operating system network stack queues incoming requests and distributes them among the workers.\n\nUnlike Unicorn, Puma can run multiple threads for each worker. Puma can be tuned to run multiple threads and workers to make optimal use of your server and workload. For example, in Puma defining \"N workers\" with 1 thread is essentially equivalent to \"N Unicorn workers.\" In multi-threaded processes thread safety is critical to ensure proper functionality. We encountered one thread safety issue while migrating to Puma and we'll get to that shortly.\n\n### Technical Descriptions\n\nUnicorn is an HTTP server for Rack applications designed to only serve fast clients on low-latency, high-bandwidth connections and take advantage of features in Unix/Unix-like kernels. Slow clients should only be served by placing a reverse proxy capable of fully buffering both the the request and response in between unicorn and slow clients.\n\nPuma is a multi-threaded web server and our replacement for Unicorn. Unlike other Ruby Webservers, Puma was built for speed and parallelism. Puma is a small library that provides a very fast and concurrent HTTP 1.1 server for Ruby web applications. It is designed for running Rack apps only.\n\nWhat makes Puma so fast is the careful use of a Ragel extension to provide fast, accurate HTTP 1.1 protocol parsing. This makes the server scream without too many portability issues.\n\n## Why Puma?\n\nWe began early investigations into Puma believing it would help resolve some of our [memory growth issues](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/3700) and also to help with scalability. By switching from Unicorn's single threaded process we could cut down on the number of processes running and the memory overhead of each of these processes. Ruby processes take up a significant amount of memory.  Threads, on the other hand, consume a much smaller amount of memory than workers because they are able to share a significantly larger portion of application memory.  When I/O causes a thread to pause, another thread can continue with its application request. In this way, multi-thread makes the best use of the available memory and CPU, reducing memory consumption by [approximately 40%](/releases/2020/05/22/gitlab-13-0-released/#reduced-memory-consumption-of-gitlab-with-puma).\n\n## The early appearance of Puma\n\nThe first appearance of Puma in a GitLab issue was in a discussion about using [multithreaded application servers](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/3592), dating back to November 20, 2015. In our spirit of iteration, the first attempt at adding experimental support for Puma followed shortly after with a [merge request](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/1899) on November 25, 2015. The initial [results](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/3592#note_2805965) indicated a lack of stability and thus did not merit us moving forward with Puma at the time. While the push [to improve our memory footprint](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/25421) continued, the efforts to move forward with Puma stalled for a while.\n\n## Experimental development use\n\nIn May, 2018 Puma was configured for [experimental development use](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/532) in GitLab Rails and [Omnibus](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/2801). Later that year, we added [Puma metrics to Prometheus](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/52769) to track our internal experimental usage of Puma. By early spring of 2019 GitLab moved forward with the creation of the [Memory Team](/blog/why-we-created-the-gitlab-memory-team/) whose early set of identified tasks was to deploy Puma to GitLab.com.\n\n\n## Implementation steps\n\nThe efforts to implement Puma on GitLab.com and for our self-managed customers started in earnest in early 2019 with the [Enable Puma Web Server for GitLab](https://gitlab.com/groups/gitlab-org/-/epics/954) epic and the creation of the Memory Team. One of the early steps we took was to [enable Puma by default in the GDK ](https://gitlab.com/gitlab-org/gitlab-development-kit/-/issues/490) to get metrics and feedback from the community and our customers while we worked to deploy on GitLab.com.\n\nThe ability to measure the improvements achieved by the Puma deployment was critical to determining whether we had achieved our goals of overall memory reduction. To capture these metrics we set up [two identical environments](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/62877) to test changes on a daily basis. This would allow us to quickly make changes to the worker/thread ratio within Puma and quickly review the impact of the changes.\n\n### A roll out plan\n\nWe have multiple pre-production environments and we follow a progression of deploying Puma to each of these stages (dev->ops->staging->canary->production). Within each of these stages we would deploy the changes to enable Puma and test the changes. Once we confirmed a successful deployment we would measure and make configuration changes for optimal performance and memory reduction.\n\n### Issues and Tuning\n\nEarly on we determined that our usage of [ChronicDuration](https://gitlab.com/gitlab-org/gitlab/-/issues/31285) was not thread-safe. We ended up [forking the code](https://gitlab.com/gitlab-org/gitlab/-/issues/31285#note_215961555) and distributing our own [gitlab-chronic-duration](https://gitlab.com/gitlab-org/gitlab-chronic-duration) to solve our thread-safety issues.\n\nWe encountered only minor issues in the previous environments but once we deployed to Canary our infrastructure team reported some [unacceptable latency issues](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/7455#note_239070865). We spent a significant amount of time tuning [Puma](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/8334) for the optimal configuration of workers to threads. We also discovered some changes required to our [health-check endpoint](https://gitlab.com/gitlab-org/omnibus-gitlab/issues/4835) to ensure minimal to no downtime during upgrades.\n\n### Puma Upstream Patch\n\nAs we zeroed in on tuning GitLab.com with Puma we discovered that the capacity was not being evenly distributed. Puma capacity is calculated by `workers * threads`, so if you have 2 workers and 2 threads you have a capacity of 4. Since Puma uses round-robin to schedule requests, and no other criteria, we saw evidence of some workers being saturated while others sat idle. The simple [fix](https://github.com/puma/puma/pull/2079/files) proposed by [Kamil Trzcinski](https://gitlab.com/ayufan) was to make Puma inject a minimal amount of latency between requests if the worker is already processing requests. This would allow other workers (that are idle) to accept socket much faster than our worker that is already processing other traffic.\n\nYou can read more details about the discovery and research [here](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/8334#note_247859173).\n\n## Our results\n\nOnce we deployed Puma to our entire web fleet we observed a drop in memory usage from 1.28T to approximately 800GB (approximately a 37% drop) while our request queuing, request duration and CPU usage all remained roughly the same.\n\nMore details and graphs can be found [here](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/1684#note_291225063). \n\nPuma is now on by default for all GitLab customers in the [GitLab 13.0 release](/releases/2020/05/22/gitlab-13-0-released/).\n\n## What's next\n\nWe want to review our infrastructure needs! The efficiency gains brought about by deploying Puma will allow us to re-examine the memory needs of Rails nodes in production. \n\nAlso, Puma has enabled us to continue to pursue our efforts to enable [real time editing](https://gitlab.com/groups/gitlab-org/-/epics/52). \n\n**More about GitLab's infrastructure:**\n\n[How we scaled Sidekiq](/blog/scaling-our-use-of-sidekiq/)\n\n[Make your pipelines more flexible](/blog/directed-acyclic-graph/)\n\n[The inside scoop on the building of our Status Page](/blog/how-we-built-status-page-mvc/)\n\nCover image by [John Moeses Bauan](https://unsplash.com/@johnmoeses) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,677,1295],{"slug":4880,"featured":6,"template":680},"migrating-to-puma-on-gitlab","content:en-us:blog:migrating-to-puma-on-gitlab.yml","Migrating To Puma On Gitlab","en-us/blog/migrating-to-puma-on-gitlab.yml","en-us/blog/migrating-to-puma-on-gitlab",{"_path":4886,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4887,"content":4893,"config":4898,"_id":4900,"_type":14,"title":4901,"_source":16,"_file":4902,"_stem":4903,"_extension":19},"/en-us/blog/monetizing-and-being-open-source",{"title":4888,"description":4889,"ogTitle":4888,"ogDescription":4889,"noIndex":6,"ogImage":4890,"ogUrl":4891,"ogSiteName":667,"ogType":668,"canonicalUrls":4891,"schema":4892},"How GitLab makes money","Monetizing open source ≠ an 'open source' company. Our CEO Sid Sijbrandij explains how we got over the barriers to monetizing an open source company.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678896/Blog/Hero%20Images/monetizing-os.jpg","https://about.gitlab.com/blog/monetizing-and-being-open-source","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab makes money\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-11-09\",\n      }",{"title":4888,"description":4889,"authors":4894,"heroImage":4890,"date":4895,"body":4896,"category":299,"tags":4897},[2313],"2018-11-09","\nWhile the number of businesses based on open source technology is growing exponentially, that doesn't mean a profitable open source company is truly open. Our CEO [Sid Sijbrandij](/company/team/#sytses) believes the key lies in not just being [a steward of an open source project](/company/stewardship/), but more so in being [transparent](https://handbook.gitlab.com/handbook/values/#transparency) and [collaborative](https://handbook.gitlab.com/handbook/values/#collaboration) – going so far as to allow the public to play a role in the way the company is run. And that’s what he’s aiming to do with GitLab:\n\n>\"We want to take it one step further, where you're not only taking open source and contributing back to that project, but you're also allowing people to contribute back to what you make – in our case GitLab – to improve the company. Sometimes people from the wider community contribute to [our handbook](/handbook/) to make improvements. I think that's really exciting. And in order for us to not create a rift between GitLab the open source project and GitLab the company, we try to work out in the public. Our strategy, our OKRs (Objectives and Key Results), all of the issues we work on are out there in the public. People can see what we're doing, and comment on it. I think that's a superpower. I'm very proud that people keep contributing to GitLab and we're working really hard to keep it that way.\"\n\n## Finding the right business model: Trial and error\n\nWhile the number of open source companies is trending up, making free software profitable is not an easy feat. Sid and co-founder and Engineering Fellow [Dmitriy Zaporozhets](/company/team/#dzaporozhets) experimented with a number of business models for GitLab before finding a formula that worked.\n\n### Donations\n\n\"Dmitriy used to talk about ice cream money, which were donations,\" Sid recalls. \"They were seven bucks a month, so he and his wife could buy ice cream once a month from the donations. We tried that and got up to $1,000 in the most profitable month after having a big drive. But that wasn't sustainable to run a company with multiple employees.\"\n\n### Feature request fees and paid support\n\nThey also tried charging a fee to build requested features from users. But that model fell apart when people found out there were others making the same request and, instead of paying for a fraction of the feature, dropped their order with the expectation that another user or company would pay for it. Sid and Dmitriy then moved to a support model, but found it to be a catch-22. As they improved the product, fewer people needed support. But to do a less-than-stellar job in the installation of GitLab \"kind of destroys\" the brand, Sid says.\n\n### Open core\n\n\"In the end, we settled on [open core](/blog/gitlab-is-open-core-github-is-closed-source/), where [some features are paid](/pricing/). The hard thing was deciding which features are paid. I think after many years we now have a good way to determine that. The feature aimed at an individual contributor, it's open source. If it's aimed at a manager, it's in Starter. If it's aimed at a director, it's in Premium. And if it's aimed at a C-level exec, it's in Ultimate. That brings a lot of clarity and it seems to work really well, but it took us a while to figure that one out.\"\n\n## What’s next\n\nTo continue engaging with and growing GitLab’s presence in the open source community, Sid plans to open the company’s summit to the wider community. The [next summit](/events/gitlab-contribute/), which occurs every nine months, is set to include about 100 community members and customers, and that figure is expected to grow in the future.\n\nWe’re also giving folks a view into our operations via more videos and plan to open our books for all to see as soon as we can.\n\n\"We’re doing more live streaming and putting recordings up on [YouTube](https://www.youtube.com/c/gitlab),\" Sid says. \"I think we're at over 400 videos now, but anything we have, it doesn't matter what the quality is, we want to post it for people to consume. I'm really excited at what's happening there.\n\n\"Another thing I look forward to doing is being more open about our financials. We plan and hope to be a public company one day, and then we will be open about our financials every quarter. As soon as we have audited financials, we'll start doing those calls, even when we're still a private company.\"\n\n[Cover image](https://unsplash.com/photos/Xaanw0s0pMk) by [Jason Leung](https://unsplash.com/@ninjason) on Unsplash\n{: .note}\n",[9,745],{"slug":4899,"featured":6,"template":680},"monetizing-and-being-open-source","content:en-us:blog:monetizing-and-being-open-source.yml","Monetizing And Being Open Source","en-us/blog/monetizing-and-being-open-source.yml","en-us/blog/monetizing-and-being-open-source",{"_path":4905,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4906,"content":4912,"config":4919,"_id":4921,"_type":14,"title":4922,"_source":16,"_file":4923,"_stem":4924,"_extension":19},"/en-us/blog/more-performant-and-robust-task-lists-in-gitlab",{"title":4907,"description":4908,"ogTitle":4907,"ogDescription":4908,"noIndex":6,"ogImage":4909,"ogUrl":4910,"ogSiteName":667,"ogType":668,"canonicalUrls":4910,"schema":4911},"How we delivered more performant and robust task lists in GitLab","How simple checkboxes became a challenging engineering problem – and how we fixed it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668319/Blog/Hero%20Images/more-robust-task-lists.jpg","https://about.gitlab.com/blog/more-performant-and-robust-task-lists-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we delivered more performant and robust task lists in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatih Acet\"},{\"@type\":\"Person\",\"name\":\"Brett Walker\"}],\n        \"datePublished\": \"2019-04-05\",\n      }",{"title":4907,"description":4908,"authors":4913,"heroImage":4909,"date":4916,"body":4917,"category":743,"tags":4918},[4914,4915],"Fatih Acet","Brett Walker","2019-04-05","\n[GitLab task lists](https://docs.gitlab.com/ee/user/markdown#task-lists) are\na list of checkboxes that you can include anywhere in GitLab where you can have\n[GitLab Flavored Markdown (GFM)](https://docs.gitlab.com/ee/user/markdown#gitlab-flavored-markdown-gfm).\nThis includes issue descriptions and comments, as well as merge requests and epics.\nThey can be used for a list of items to consider when building a feature, tracking\ntasks for new employees to complete when onboarding, or even managing that list\nof materials to purchase for your next home renovation. You can use them as todo\nlists, and so checking off an item should be quick and satisfying.\n\n## More checkboxes, more problems\n\nIn the past, task lists with several items, even dozens, worked fairly well. Check\nan empty checkbox, and a database record gets updated. The checkbox is then displayed\nas checked. Done.\n\nHowever, as the number of items increases, and the consequent\nmarkdown becomes more complex and longer, problems begin to appear. For example,\nvisually the checkbox appears checked, but because updating the backend takes a\nlonger time, if you checked another checkbox, the screen would refresh several seconds\nlater and the checkbox might then be unchecked. It soon became next to impossible\nto go down a list and check off items without waiting 10 seconds between each one.\nYet another problem was that if other users were also checking items on the list,\nyour change could be erased by them checking their item – they were overwriting\nyour data.\n\nIn [GitLab 11.8](/releases/2019/02/22/gitlab-11-8-released/#performance-improvements) (released on Feb. 22, 2019),\nwe significantly increased the performance of task lists, as well as making them\nmuch more robust. Here's how we did it:\n\n### Essentially we wanted:\n\n- Checking a checkbox to be as fast as possible.\n- Many users to concurrently interact with checkboxes in the same task list,\nwithout overwriting each other.\n\nBoth the performance and data integrity issues stemmed from the fact that we were\nupdating the complete markdown. This meant that we changed the markdown source in\nthe browser with the updated checkbox, sent it to the backend, where it was saved\nto the database, and then re-rendered so that we could cache the new  and send\nit back to the user.\n\n## A scalable solution\n\nBut what if we could update a single checkbox, and send only that to the backend? That\nmight allow multiple users to check off as many tasks as they wanted, without clobbering\neach other. And what if we didn't have to do any markdown rendering at all? We wouldn't\nhave to do any markdown processing, or process embedded issue links, or query if\nlabels have changed, or any of the other advanced things that go on when updating\nan issue. Performance would definitely increase in this case.\n\n### Frontend work\n\nOn the frontend, with only a small modification to the\n[deckar01/task_list](https://github.com/deckar01/task_list/commit/d1c96451df5fb8fdadc2cd080f65ffe2d2076a3a)\ngem we use, we were able to pass the exact text and line number in the markdown source\nfor the clicked task.\n[Wrap this piece of information](https://gitlab.com/gitlab-org/gitlab-ce/blob/b4165554113a7f9ce9fecd7d169f9a64686b5c44/app/assets/javascripts/task_list.js#L63-68)\nin a new `update_task` parameter for our update endpoint, and send it to the backend.\n\n### Backend work\n\nOn the backend,\n[we needed to verify](https://gitlab.com/gitlab-org/gitlab-ce/blob/b4165554113a7f9ce9fecd7d169f9a64686b5c44/app/services/task_list_toggle_service.rb#L30-51)\nthat the task we were interested in still existed in exactly the same format – the text had to match\nthe exact line number in the source. This meant that even if someone changed text above or below\nthe task item, as long as our line matched exactly, we could update that line in the latest source\nand save it without losing changes.\n\nIn order to update our cached HTML so that we wouldn't have to re-render it, we turned on\nthe `SOURCEPOS` flag of the CommonMark renderer, which adds a `data-sourcepos` attribute to the HTML.\nFor example, a task item's might look like this:\n\n```\n\u003Cli data-sourcepos=\"1:1-1:12\" class=\"task-list-item\">\n  \u003Cinput type=\"checkbox\" class=\"task-list-item-checkbox\" disabled> Task 1\n\u003C/li>\n```\n\nWith a little [Nokogiri](https://nokogiri.org) magic we were able to find the correct line\nand toggle the checked attribute.\n\nSince we updated the cache directly, we completely bypassed any markdown rendering,\nprocessing of special attributes, etc. Performance dramatically increased. However,\nsince we are not able to get it down to zero, we disabled the checkboxes while the\nrequest was in flight to ensure we weren't getting clicks on other tasks.\n\nThe result: a much more satisfying task list.\n\n[Brett Walker](https://gitlab.com/digitalmoksha) worked on the backend changes and\n[Fatih Acet](https://gitlab.com/fatihacet) worked on the frontend changes in this\nimprovement. See more details in [the GitLab issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/19745).\n\nPhoto by [Glenn Carstens-Peters](https://unsplash.com/photos/RLw-UC03Gwc?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[3138,9],{"slug":4920,"featured":6,"template":680},"more-performant-and-robust-task-lists-in-gitlab","content:en-us:blog:more-performant-and-robust-task-lists-in-gitlab.yml","More Performant And Robust Task Lists In Gitlab","en-us/blog/more-performant-and-robust-task-lists-in-gitlab.yml","en-us/blog/more-performant-and-robust-task-lists-in-gitlab",{"_path":4926,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4927,"content":4933,"config":4939,"_id":4941,"_type":14,"title":4942,"_source":16,"_file":4943,"_stem":4944,"_extension":19},"/en-us/blog/moving-to-headless-chrome",{"title":4928,"description":4929,"ogTitle":4928,"ogDescription":4929,"noIndex":6,"ogImage":4930,"ogUrl":4931,"ogSiteName":667,"ogType":668,"canonicalUrls":4931,"schema":4932},"How GitLab switched to Headless Chrome for testing","A detailed explanation with examples of how GitLab made the switch to headless Chrome.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680270/Blog/Hero%20Images/headless-chrome-cover.jpg","https://about.gitlab.com/blog/moving-to-headless-chrome","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab switched to Headless Chrome for testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Greiling\"}],\n        \"datePublished\": \"2017-12-19\",\n      }",{"title":4928,"description":4929,"authors":4934,"heroImage":4930,"date":4936,"body":4937,"category":743,"tags":4938},[4935],"Mike Greiling","2017-12-19","\n\nGitLab recently switched from PhantomJS to headless Chrome for both our\nfrontend tests and our RSpec feature tests. In this post we will detail the\nreasons we made this transition, the challenges we faced, and the solutions we\ndeveloped. We hope this will benefit others making the switch.\n\n\u003C!-- more -->\n\nWe now have a truly accurate way to test GitLab within a real, modern browser.\nThe switch has improved our ability to write tests and debug them while running\nthem directly in Chrome. Plus the change forced us to confront and clean up a\nnumber of hacks we had been using in our tests.\n\n## Switching to headless Chrome from PhantomJS: background\n\n[PhantomJS](http://phantomjs.org) has been a part of GitLab's test framework\n[for almost five years](https://gitlab.com/gitlab-org/gitlab-ce/commit/ba25b2dc84cc25e66d6fa1450fee39c9bac002c5).\nIt has been an immensely useful tool for running browser integration tests in a\nheadless environment at a time when few options were available. However, it\nhad some shortcomings:\n\nThe most recent version of PhantomJS (v2.1.1) is compiled with a three-year-old\nversion of [QtWebKit](https://trac.webkit.org/wiki/QtWebKit) (a fork of WebKit\nv538.1 according to the user-agent string). This puts it on par with something\nlike Safari 7 on macOS 10.9. It resembles a real modern browser, but it's not\nquite there. It has a different JavaScript engine, an older rendering engine,\nand a host of missing features and quirks.\n\nAt this time, GitLab supports [the current and previous major\nrelease](https://docs.gitlab.com/ee/install/requirements.html#supported-web-browsers) of\nFirefox, Chrome, Safari, and Microsoft Edge/IE. This puts PhantomJS and its\ncapabilities somewhere near or below our lowest common denominator. Many modern\nbrowser features either [do not work](http://phantomjs.org/supported-web-standards.html),\nor [require vendor prefixes](http://phantomjs.org/tips-and-tricks.html) and\npolyfills that none of our supported browsers require. We could selectively\nadd these polyfills, prefixes, and other workarounds just within our test\nenvironment, but doing so would increase technical debt, cause confusion, and\nmake the tests less representative of a true production environment. In most\ncases we had opted to simply omit them or hack around them (more on this\n[later](#trigger-method)).\n\nHere's a screenshot of the way PhantomJS renders a page from GitLab, followed\nby the same page rendered in Google Chrome:\n\n![Page Rendered by PhantomJS](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/render-phantomjs.png){: .shadow.center}\n\n![Page Rendered by Google Chrome](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/render-chrome.png){: .shadow.center}\n\nYou can see in PhantomJS the filter tabs are rendered horizontally, the icons\nin the sidebar render on their own lines, the global search field is\noverflowing off the navbar, etc.\n\nWhile it looks ugly, in most cases we could still use this to run functional\ntests, so long as elements of the page remain visible and clickable, but this\ndisparity with the way GitLab rendered in a real browser did introduce several\nedge cases.\n\n## What is headless Chrome\n\nIn April of this year, [news spread](https://news.ycombinator.com/item?id=14101233)\nthat Chrome 59 would support a [native, cross-platform headless\nmode](https://www.chromestatus.com/features/5678767817097216). It was\npreviously possible to simulate a headless Chrome browser in CI/CD [using\nvirtual frame buffer](https://gist.github.com/addyosmani/5336747), but this\nrequired a lot of memory and extra complexities. A native headless mode is a\ngame changer. It is now possible to run integration tests in a headless\nenvironment on a real, modern web browser that our users actually use!\n\nSoon after this was revealed, Vitaly Slobodin, PhantomJS's chief developer,\nannounced that the project [would no longer be\nmaintained](https://github.com/ariya/phantomjs/issues/15105#issuecomment-322850178):\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-cards=\"hidden\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">This is the end - \u003Ca href=\"https://t.co/GVmimAyRB5\">https://t.co/GVmimAyRB5\u003C/a>\u003Ca href=\"https://twitter.com/hashtag/phantomjs?src=hash&amp;ref_src=twsrc%5Etfw\">#phantomjs\u003C/a> 2.5 will not be released. Sorry, guys!\u003C/p>&mdash; Vitaly Slobodin (@Vitalliumm) \u003Ca href=\"https://twitter.com/Vitalliumm/status/852450027318464513?ref_src=twsrc%5Etfw\">April 13, 2017\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nIt became clear that we would need to make the transition away from PhantomJS at\nsome point, so we [opened up an issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/30876),\ndownloaded the Chrome 59 beta, and started looking at options.\n\n### Frontend tests (Karma)\n\nOur frontend test suite utilizes the [Karma](http://karma-runner.github.io/)\ntest runner, and updating this to work with Google Chrome was surprisingly\nsimple ([here's the merge request](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12036)).\nThe [karma-chrome-launcher](https://github.com/karma-runner/karma-chrome-launcher)\nplugin was very quickly updated to support headless mode starting from\n[version 2.1.0](https://github.com/karma-runner/karma-chrome-launcher/releases/tag/v2.1.0),\nand it was essentially a drop-in replacement for the PhantomJS launcher. Once\nwe [re-built our CI/CD build images](https://gitlab.com/gitlab-org/gitlab-build-images/merge_requests/41)\nto include Google Chrome 59 (and fiddled around with some pesky timeout\nsettings), it worked!  We were also able to remove some rather ugly\nPhantomJS-specific hacks that Jasmine required to spy on some built-in browser\nfunctions.\n\n### Backend feature tests (RSpec + Capybara)\n\nOur feature tests use RSpec and [Capybara](https://github.com/teamcapybara/capybara)\nto perform full end-to-end integration testing of database, backend, and\nfrontend interactions. Before switching to headless Chrome, we had used\n[Poltergeist](https://github.com/teampoltergeist/poltergeist) which is a\nPhantomJS driver for Capybara. It would spin up a PhantomJS browser instance\nand direct it to browse, fill out forms, and click around on pages to verify\nthat everything behaved as it should.\n\nSwitching from PhantomJS to Google Chrome required a change in drivers from\nPoltergeist to Selenium and [ChromeDriver](https://sites.google.com/a/chromium.org/chromedriver/).\nSetting this up was pretty straightforward. You can install ChromeDriver on\nmacOS with `brew install chromedriver` and the process is similar on any given\npackage manager in Linux. After this we added the `selenium-webdriver` gem to\nour test dependencies and configured Capybara like so:\n\n```ruby\nrequire 'selenium-webdriver'\n\nCapybara.register_driver :chrome do |app|\n  options = Selenium::WebDriver::Chrome::Options.new(\n    args: %w[headless disable-gpu no-sandbox]\n  )\n  Capybara::Selenium::Driver.new(app, browser: :chrome, options: options)\nend\n\nCapybara.javascript_driver = :chrome\n```\n\nGoogle says the [`disable-gpu` option is necessary for the time\nbeing](https://developers.google.com/web/updates/2017/04/headless-chrome#cli)\nuntil some bugs are resolved. The `no-sandbox` option also appears to be\nnecessary to get Chrome running inside a Docker container for [GitLab's CI/CD\nenvironment](/topics/ci-cd/). Google provides a [useful guide for working with headless Chrome\nand Selenium](https://developers.google.com/web/updates/2017/04/headless-chrome).\n\nIn our final implementation we changed this to conditionally add the `headless`\noption unless you have `CHROME_HEADLESS=false` in your environment. This makes\nit easy to disable headless mode while debugging or writing tests. It's also\npretty fun to watch tests execute on the browser window in real time:\n\n```shell\nexport CHROME_HEADLESS=false\nbundle exec rspec spec/features/merge_requests/filter_merge_requests_spec.rb\n```\n\n![Tests Executing in Chrome](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/headlessless-chrome-tests.gif){: .shadow.center}\n\n### What is the differences between Poltergeist and Selenium?\n\nThe process of switching drivers here was not nearly as straightforward as\nit was with the frontend test suite. Dozens of tests started failing as soon\nas we changed our Capybara configuration, and this was due to some major\ndifferences in the way Selenium/ChromeDriver implemented Capybara's driver API\ncompared to Poltergeist/PhantomJS. Here are some of the challenges we ran into:\n\n1.  **JavaScript modals are no longer accepted automatically**\n\n    We often use JavaScript `confirm(\"Are you sure you want to do X?\");` click\n    events when performing a destructive action such as deleting a branch or\n    removing a user from a group. Under Poltergeist a `.click` action would\n    automatically accept modals like `alert()` and `confirm()`, but under\n    Selenium, you now need to wrap these with one of `accept_alert`,\n    `accept_confirm`, or `dismiss_confirm`. e.g.:\n\n    ```ruby\n    # Before\n    page.within('.some-selector') do\n      click_link 'Delete'\n    end\n\n    # After\n    page.within('.some-selector') do\n      accept_confirm { click_link 'Delete' }\n    end\n    ```\n\n1.  **Selenium `Element.visible?` returns false for empty elements**\n\n    If you have an empty `div` or `span` that you want to access in your test,\n    Selenium does not consider these \"visible.\" This is not much of an issue\n    unless you set `Capybara.ignore_hidden_elements = true` as we do in our\n    feature tests. Where `find('.empty-div')` would have worked fine in\n    Poltergeist, we now need to use `visible: :any` to\n    select such elements.\n\n    ```ruby\n    # Before\n    find('.empty-div')\n\n    # After\n    find('.empty-div', visible: :any)\n    # or\n    find('.empty-div', visible: false)\n    ```\n\n    More on [Capybara and hidden elements](https://makandracards.com/makandra/7617-change-how-capybara-sees-or-ignores-hidden-elements).\n\n1.  {:#trigger-method} **Poltergeist's `Element.trigger('click')` method does not exist in Selenium**\n\n    In Capybara, when you use `find('.some-selector').click`, the element you\n    are clicking must be both visible and unobscured by any overlapping\n    element. Situations where links could not be clicked would sometimes occur\n    with Poltergeist/PhantomJS due to its poor CSS support sans-prefixes.\n    Here's one example:\n\n    ![Overlapping elements](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/overlapping-element.png){: .shadow.center}\n\n    The broken layout of the search form here was actually placing an invisible\n    element over the top of the \"Update all\" button, making it unclickable.\n    Poltergeist offers a `.trigger('click')` method to work around this.\n    Rather than actually clicking the element, this method would trigger a DOM\n    event to simulate a click. Utilizing this method was a bad practice, but\n    we ran into similar issues so often that many developers formed a habit\n    of using it everywhere. This began to lead to some lazy and sloppy test\n    writing. For instance, someone might use `.trigger` as a shortcut to click\n    on an link that was obscured behind an open dropdown menu, when a properly\n    written test should `.click` somewhere to close the dropdown, and _then_\n    `.click` on the item behind it.\n\n    Selenium does not support the `.trigger` method. Now that we were using a\n    more accurate rendering engine that won't break our layouts, many of these\n    instances could be resolved by simply replacing `.trigger('click')` with\n    `.click`, but due to some of the bad practice uses mentioned above, this\n    didn't always work.\n\n    There are of course some ways to hack a `.trigger` replacement. You could\n    simulate a click by focusing on an element and hitting the \"return\" key,\n    or use JavaScript to trigger a click event, but in most cases we decided to\n    take the time and actually correct these poorly implemented tests so that a\n    normal `.click` could again be used. After all, if our tests are meant to\n    simulate a real user interacting with the page, we should limit ourselves\n    to the actions a real user would be expected to use.\n\n    ```ruby\n    # Before\n    find('.obscured-link').trigger('click')\n\n    # After\n\n    # bad\n    find('.obscured-link').send_keys(:return)\n\n    # bad\n    execute_script(\"document.querySelector('.obscured-link').click();\")\n\n    # good\n    # do something to make link accessible, then\n    find('.link').click\n    ```\n\n1.  **`Element.send_keys` only works on focus-able elements**\n\n    We had a few places in our code where we would test out our keyboard\n    shortcuts using something like `find('.boards-list').native.send_keys('i')`.\n    It turns out Chrome will not allow you to `send_keys` to any element that\n    cannot be \"focused\", e.g. links, form elements, the document body, or\n    presumably anything with a tab index.\n\n    In all of the cases where we were doing this, triggering `send_keys` on the\n    body element would work since that's ultimately where our event handler was\n    listening anyway:\n\n    ```ruby\n    # Before\n    find('.some-div').native.send_keys('i')\n\n    # After\n    find('body').native.send_keys('i')\n    ```\n\n1.  **`Element.send_keys` does not support non-BMP characters (like emoji)**\n\n    In a few tests, we needed to fill out forms with emoji characters. With\n    Poltergeist we would do this like so:\n\n    ```ruby\n    # Before\n    find('#note-body').native.send_keys('@💃username💃')\n    ```\n\n    In Selenium we would get the following error message:\n\n    ```\n    Selenium::WebDriver::Error::UnknownError:\n        unknown error: ChromeDriver only supports characters in the BMP\n    ```\n\n    To work around this, we added [a JavaScript method to our test bundle that\n    would simulate input and fire off the same DOM events](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/app/assets/javascripts/test_utils/simulate_input.js)\n    that an actual keyboard input would generate on every keystroke, then\n    wrapped this with a [ruby helper](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/spec/support/input_helper.rb)\n    method that could be called like so:\n\n    ```ruby\n    # After\n    include InputHelper\n\n    simulate_input('#note-body', \"@💃username💃\")\n    ```\n\n1.  **Setting cookies is much more complicated**\n\n    It's quite common to want to set some cookies before `visit`ing a page that\n    you intend to test, whether it's to mock a user session, or toggle a\n    setting. With Poltergeist, this process is really simple. You can use\n    `page.driver.set_cookie`, provide a simple key/value pair, and it will just\n    work as expected, setting a cookie with the correct domain and scope.\n\n    Selenium is quite a bit more strict. The method is now\n    `page.driver.browser.manage.add_cookie`, and it comes with two caveats:\n\n    - You cannot set cookies until you `visit` a page in the domain you intend\n      to scope your cookies to.\n    - Annoyingly, you cannot alter the `path` parameter (or at least we could\n      never get this to work), so it is best to set cookies at the root path.\n\n    Before you `visit` your page, Chrome's url is technically sitting at\n    something like `about:blank;`. When you attempt to set a cookie there, it\n    will refuse because there is no hostname, and you cannot coerce one by\n    providing a domain as an argument. The [Selenium\n    documentation](http://docs.seleniumhq.org/docs/03_webdriver.jsp#cookies)\n    suggests that you do the following:\n\n    > If you are trying to preset cookies before you start interacting with a\n    > site and your homepage is large / takes a while to load, an alternative is\n    > to find a smaller page on the site (typically the 404 page is small, e.g.\n    > `http://example.com/some404page`).\n\n    ```ruby\n    # Before\n    before do\n      page.driver.set_cookie('name', 'value')\n    end\n\n    # After\n    before do\n      visit '/some-root-path'\n      page.driver.browser.manage.add_cookie(name: 'name', value: 'value')\n    end\n    ```\n\n1.  **Page request/response inspection methods are missing**\n\n    Poltergeist very conveniently implemented methods like `page.status_code`\n    and `page.response_headers` which are also present in Capybara's default\n    `RackTest` driver, making it easy to inspect the raw response from the\n    server, in addition to the way that response is rendered by the browser. It\n    also allowed you to inject headers into the requests made to the server,\n    e.g.:\n\n    ```ruby\n    # Before\n    before do\n      page.driver.add_header('Accept', '*/*')\n    end\n\n    it 'returns a 404 page'\n      visit some_path\n\n      expect(page.status_code).to eq(404)\n      expect(page).to have_css('.some-selector')\n    end\n    ```\n\n    Selenium does not implement these methods, and [the authors do not intend\n    to add support for them](https://github.com/seleniumhq/selenium-google-code-issue-archive/issues/141#issuecomment-191404986),\n    so we needed to develop a workaround. Several people have suggested running\n    a proxy alongside ChromeDriver that would intercept all traffic to and from\n    the server, but this seemed to us like overkill. Instead, we opted to\n    create a [lightweight Rack middleware](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/lib/gitlab/testing/request_inspector_middleware.rb)\n    and a corresponding [helper class](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/spec/support/inspect_requests.rb)\n    that would intercept the traffic for inspection. This is similar to our\n    [RequestBlockerMiddleware](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/lib/gitlab/testing/request_blocker_middleware.rb)\n    that we were already using to intelligently `wait_for_requests` to complete\n    within our tests. It works like this:\n\n    ```ruby\n    # After\n    it 'returns a 404 page'\n      requests = inspect_requests do\n        visit some_path\n      end\n\n      expect(requests.first.status_code).to eq(404)\n      expect(page).to have_css('.some-selector')\n    end\n    ```\n\n    Within the `inspect_requests` block, the Rack middleware will log all\n    requests and responses, and return them as an array for inspection. This\n    will include the page being `visit`ed as well as the subsequent XHR and\n    asset requests, but the initial path request will be the first in the array.\n\n    You can also inject headers using the same helper like so:\n\n    ```ruby\n    # After\n    inspect_requests(inject_headers: { 'Accept' => '*/*' }) do\n      visit some_path\n    end\n    ```\n\n    This middleware should be injected early in the stack to ensure any other\n    middleware that might intercept or modify the request/response will be\n    seen by our tests. We include this line in our test environment config:\n\n    ```ruby\n    config.middleware.insert_before('ActionDispatch::Static', 'Gitlab::Testing::RequestInspectorMiddleware')\n    ```\n\n1.  **Browser console output is no longer output to the terminal**\n\n    Poltergeist would automatically output any `console` messages directly into\n    the terminal in real time as tests were run. If you had a bug in the frontend\n    code that caused a test to fail, this feature would make debugging much\n    easier as you could inspect the terminal output of the test for an error\n    message or a stack trace, or inject your own `console.log()` into the\n    JavaScript to see what is going on. With Selenium this is sadly no longer the\n    case.\n\n    You can, however, collect browser logs by configuring Capybara like so:\n\n    ```ruby\n    capabilities = Selenium::WebDriver::Remote::Capabilities.chrome(\n      loggingPrefs: {\n        browser: \"ALL\",\n        client: \"ALL\",\n        driver: \"ALL\",\n        server: \"ALL\"\n      }\n    )\n\n    # ...\n\n    Capybara::Selenium::Driver.new(\n      app,\n      browser: :chrome,\n      desired_capabilities: capabilities,\n      options: options\n    )\n    ```\n\n    This will allow you to access logs with the following, i.e. in the event of\n    a test failure:\n\n    ```ruby\n    page.driver.manage.get_log(:browser)\n    ```\n\n    This is far more cumbersome than it was in Poltergeist, but it's the best\n    method we've found so far. Thanks to [Larry Reid's blog post](http://technopragmatica.blogspot.com/2017/10/switching-to-headless-chrome-for-rails_31.html)\n    for the tip!\n\n## Results\n\nRegarding performance, we attempted to quantify the change with a\nnon-scientific analysis of 10 full-suite RSpec test runs _before_ this change,\nand 10 more runs from _after_ this change, factoring out any tests that were\nadded or removed between these pipelines. The end result was:\n\n**Before:** 5h 18m 52s\n**After:** 5h 12m 34s\n\nA savings of about six minutes, or roughly 2 percent of the total compute time, is\nstatistically insignificant, so I'm not going to claim we improved our test\nspeed with this change.\n\nWhat we did improve was test accuracy, and we vastly improved the tools at our\ndisposal to write and debug tests. Now, all of the Capybara screenshots\ngenerated when a CI/CD job fails look exactly as they do on your own browser\nrather than resembling the broken PhantomJS screenshot above. Inspecting a\nfailing test locally can now be done interactively by turning off headless\nmode, dropping a `byebug` line into the spec file, and watching the browser\nwindow as you type commands into the prompt. This technique proved extremely\nuseful while working on this project.\n\nYou can find all of the changes we made in [the original merge request page\non GitLab.com](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12244).\n\n## What are some additional uses for headless Chrome?\n\nWe have also been utilizing headless Chrome to analyze frontend performance, and have found it to be useful in detecting issues.\n\nWe'd like to make it easier for other companies to embrace as well, so as part of the upcoming 10.3 release of GitLab we are releasing [Browser Performance Testing](https://docs.gitlab.com/ee/user/project/merge_requests/browser_performance_testing.html). Leveraging [GitLab CI/CD](/features/continuous-integration/), headless Chrome is launched against a set of pages and an overall performance score is calculated. Then for each merge request the scores are compared between the source and target branches, making it easier detect performance regressions prior to merge.\n\n## Acknowledgements\n\nI sincerely hope this information will prove useful to anybody else looking to\nmake the switch from PhantomJS to headless Chrome for their Rails application.\n\nThanks to the Google team for their very helpful documentation, thanks to the\nmany bloggers out there who shared their own experiences with hacking headless\nChrome in the early days of its availability, and special thanks to Vitaly\nSlobodin and the rest of the contributors to PhantomJS who provided us with an\nextremely useful tool that served us for many years. 🙇‍\n\n\u003Cstyle>\n\n.center {\n  text-align: center;\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n\ncode, kbd {\n  font-size: 80%;\n}\n\n\u003C/style>\n",[9,3138,722],{"slug":4940,"featured":6,"template":680},"moving-to-headless-chrome","content:en-us:blog:moving-to-headless-chrome.yml","Moving To Headless Chrome","en-us/blog/moving-to-headless-chrome.yml","en-us/blog/moving-to-headless-chrome",{"_path":4946,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4947,"content":4953,"config":4957,"_id":4959,"_type":14,"title":4960,"_source":16,"_file":4961,"_stem":4962,"_extension":19},"/en-us/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol",{"title":4948,"description":4949,"ogTitle":4948,"ogDescription":4949,"noIndex":6,"ogImage":4950,"ogUrl":4951,"ogSiteName":667,"ogType":668,"canonicalUrls":4951,"schema":4952},"Moving workflows to GitLab: The case of the HIPAA Audit Protocol","With the GitLab API, you can easily move workflows into GitLab. Here’s how we did it for the HIPAA Audit Protocol.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679716/Blog/Hero%20Images/bright-cardiac-cardiology.jpg","https://about.gitlab.com/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Moving workflows to GitLab: The case of the HIPAA Audit Protocol\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Luka Trbojevic\"}],\n        \"datePublished\": \"2019-07-25\",\n      }",{"title":4948,"description":4949,"authors":4954,"heroImage":4950,"date":1129,"body":4955,"category":743,"tags":4956},[910],"\n\nUsing GitLab for just about everything we do, I’ve seen firsthand how powerful and effective\nit can be as a project management tool.\n\n**However, in speaking with folks about adopting GitLab for their own non-development\nworkflows, the most common roadblock I hear is the lack of specific examples.** If you're not\na developer or otherwise don't work with GitLab often, it can be hard to see how all the\nfeatures and capabilities fit together to go from an idea to a functional workflow. Because\nof this, I thought it was important to create a specific, real-world example for something most\nfolks can relate to: an audit.\n\nThe [HIPAA Audit Protocol](https://www.hhs.gov/hipaa/for-professionals/compliance-enforcement/audit/protocol/index.html)\nis published by the U.S. Department of Health & Human Services and is used by the\nOffice for Civil Rights as part of its HIPAA compliance enforcement efforts. The\nHIPAA Audit Protocol currently exists as a table on the HHS website and is most commonly turned\ninto a spreadsheet. But there are limitations and inefficiencies to working with an audit\nprotocol in a spreadsheet, as compared to GitLab:\n\n* Collaboration is challenging in a spreadsheet. With issues, robust and well-organized conversations are easy.\n* You can't upload audit protocol files directly to the spreadsheet. With issues, you can upload files directly.\n* There's no simple, clean way to maintain a full, comprehensive change history in a\nspreadsheet. With issues and merge requests, change history and logging is directly built in.\n* It can be challenging to manage due dates, milestones, and work assignments in a\nspreadsheet. With issues, those are all native features.\n\n## Introducing the HIPAA Audit Protocol Issue Generator\n\nThe HIPAA Audit Protocol Issue Generator is a simple Python script using the\n`python-gitlab` API wrapper to create issues out of every audit inquiry in the protocol.\nThe purpose of the script is to highlight how easy it is to use the GitLab API to move\nworkflows inside of GitLab and leverage GitLab's project management capabilities.\n\nThe best part? Creating this tool was really easy and simple. I started by copying the\nHTML table of the audit protocol into a CSV. Then I wrote a simple loop to go through each\nrow in the CSV and automatically create an associated issue. Note: While you can use the\nissue importer, you’d have to create a very strictly structured and formatted import file.\nWith the GitLab API, you have more flexibility and the output is easier to work with.\n\nFor your use, we’ve made\nthe [full script and a CSV of the audit protocol](https://gitlab.com/ltrbojevic/hipaa-audit-protocol-issue-generator)\navailable.\n\nHere’s what an issue looks like:\n\n![sample issue](https://about.gitlab.com/images/blogimages/HIPAA-audit-protocol-example.png){: .shadow.medium.center}\n\nAll the information you need is front and center and the issues are labeled.\n{: .note.text-center}\n\n### Customizing the issue structure\n\nFirst, it’s important to understand how we load the data from the CSV. There are different\nways to do it, but I like to assign every column in the CSV to a variable, then pass that\nvariable to the issue create API call. For this script, we have:\n\n```\naudit_type = col[0]\nsection = col[1]\nkey_activity = col[2]\nestablished_performance_criteria = col[3]\naudit_inquiry = col[4]\nrequired_addressable = col[5]\n```\n\nThat means `audit_type` is the first column, `section` is the second column, and so on.\n\nThese variables then get used in the issue create API call. For this script, we have:\n\n```\nissue = project.issues.create({'title': key_activity,\n'description': '## Established Performance Criteria' + '\\n' +\nestablished_performance_criteria + '\\n' + '## Audit Inquiry' +\n'\\n' + audit_inquiry,\n'labels': [audit_type] + [required_addressable] + [section]})\n```\n\n#### Title\n\nI decided to use the respective Key Activity of every audit inquiry. In the issue create API\ncall, it looks like this:\n\n` ‘title’: key_activity, `\n\nYou can make the title anything you want. In this case I just used a column from the CSV, but I\ncould start or end the title with some other text not in the CSV, like:\n\n` ‘title’: Any text you want:’ + ‘ ‘ key_activity, `\n\n#### Description\n\nI just used a string to manually write out the headers for the section and filled the sections\nusing the data from the CSV. In the issue create API call, it looks like this:\n\n` 'description': '## Established Performance Criteria' + '\\n' + established_performance_criteria + '\\n' + '## Audit Inquiry' + '\\n' + audit_inquiry, `\n\nNotice how I use the newline. Without the newline, all of the data would be added to the description\nright next to each other and it would be unreadable. You can add as many newlines as you want,\nand if you’re planning on doing regular editing of the issue description itself, consider\nadding two newlines to create a new paragraph so the issue description is more readable in edit mode.\n\n#### Labels\n\nLabels are very helpful for organizing, searching, filtering, and creating boards. For the labels,\nI opted to use Audit Type and Section. In the issue create API call, it looks like this:\n\n` 'labels': [audit_type] + [required_addressable] + [section] `\n\nYou can also add your own labels to the CSV by creating a new column and adding the labels you\nwant for every given row, or you can add a static label applied to all the issues by adding it\nto the API call. Make sure to keep the variables in brackets or the string will split\n(for example, instead of `Privacy` it will create a label for each letter in the word `Privacy`).\n\n#### Adding other sections\n\nYou can customize the script to add any other sections allowable by\nthe [GitLab API](https://docs.gitlab.com/ee/api/). Because we’re working with issues,\nthe [GitLab Issues API](https://docs.gitlab.com/ee/api/issues.html) documentation will be\nhelpful. The [`python-gitlab` documentation](https://python-gitlab.readthedocs.io/en/stable/index.html) is\nalso a great resource, given that this script makes use of it.\n\n## Making your own workflows in GitLab\n\nWhile in this blog post I've focused on the HIPAA Audit Protocol and the issue generator\nscript, it is also a practical, hands-on example of how simple it is to use the\nGitLab API to move any workflow to GitLab. There are two primary components:\n\n1. A data source (I prefer CSV files)\n2. A crafted API call to use the data source to bring the data into GitLab\n\nI think of the data source as the thing I want in GitLab and the crafted API call as the\nvehicle to get it into GitLab. Think of a row in your CSV as an issue and the columns as the\nthings you’re putting in the issue.\n\nWhile my specific example was the audit protocol, we can use this strategy for just about anything.\nSome examples are risk assessments, gap analyses, event planning, product launches, and more.\n\nTo adapt this script for other workflows:\n1. Start by getting your data into a CSV. Be sure to remove your headers before running the script\nbecause the script doesn’t account for column headers as is!\n2. Modify the variables and issue create API call we talked about in\nthe [Customizing the issue structure section above](#customizing-the-issue-structure) to match with your CSV and data.\n\nAnd that’s really it!\n\nAt GitLab, we use the\n[simplest and most boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions)\nto solve problems. With this approach, we were able to turn an HTML table of the HIPAA Audit Protocol\non the HHS website into a functional workflow within GitLab in just a few\nhours (including API research time and CSV formatting) and in 42 lines of code (including comments).\nTo add to that, the script can be repurposed for just about any other workflow. Plus, the script is\navailable for anyone to download, use, and modify in any way, and this blog post can serve as a\nguide on how to do that.\n\nAs a long-term solution to bring this functionality into\nGitLab as a feature, there’s also an\n[open issue to collect feedback on creating\na marketplace for issue templates](https://gitlab.com/gitlab-org/gitlab-ce/issues/62895). If you have any suggestions or comments about the marketplace\nidea, feel free to post them in the issue!\n\nHave a specific question you want answered or want to get feedback on a specific use case? Comment below!\n\n### Disclaimer\nTHE INFORMATION PROVIDED ON THIS WEBSITE IS TO BE USED FOR INFORMATIONAL PURPOSES ONLY. THE\nINFORMATION SHOULD NOT BE RELIED UPON OR CONSTRUED AS LEGAL OR COMPLIANCE ADVICE OR OPINIONS.\nTHE INFORMATION IS NOT COMPREHENSIVE AND WILL NOT GUARANTEE COMPLIANCE WITH ANY REGULATION OR\nINDUSTRY STANDARD. YOU MUST NOT RELY ON THE INFORMATION FOUND ON THIS WEBSITE AS AN\nALTERNATIVE TO SEEKING PROFESSIONAL ADVICE FROM YOUR ATTORNEY AND/OR COMPLIANCE PROFESSIONAL.\n{: .note}\n\nPhoto by [Pixabay](https://www.pexels.com/photo/bright-cardiac-cardiology-care-433267/) on [Pexels](https://www.pexels.com)\n{: .note}\n",[9,745,720],{"slug":4958,"featured":6,"template":680},"moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol","content:en-us:blog:moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol.yml","Moving Workflows To Gitlab The Case Of The Hipaa Audit Protocol","en-us/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol.yml","en-us/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol",{"_path":4964,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4965,"content":4971,"config":4976,"_id":4978,"_type":14,"title":4979,"_source":16,"_file":4980,"_stem":4981,"_extension":19},"/en-us/blog/mvcs-with-big-results",{"title":4966,"description":4967,"ogTitle":4966,"ogDescription":4967,"noIndex":6,"ogImage":4968,"ogUrl":4969,"ogSiteName":667,"ogType":668,"canonicalUrls":4969,"schema":4970},"4 Examples of MVCs with big results","Small change, big impact. Here are four recent tweaks to GitLab which exemplify our value of iteration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678764/Blog/Hero%20Images/mvcs-big-results.jpg","https://about.gitlab.com/blog/mvcs-with-big-results","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 Examples of MVCs with big results\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Wu\"}],\n        \"datePublished\": \"2018-09-07\",\n      }",{"title":4966,"description":4967,"authors":4972,"heroImage":4968,"date":4973,"body":4974,"category":299,"tags":4975},[2035],"2018-09-07","\nIteration is [one of our values](https://handbook.gitlab.com/handbook/values/#iteration), and it's often the hardest to stick to. It’s difficult to determine the smallest feature or update that will still bring additional value to users. The benefit is that we can ship quickly and get feedback from GitLab users within days or weeks, instead of months or quarters.\n\nAt GitLab we practice iteration by shipping Minimally Viable Changes (MVCs). This can be a new feature scoped to a small functionality, or incremental improvements on it thereafter. Read more about MVC in our [Product handbook](/handbook/product/product-principles/#the-minimal-viable-change-mvc).\n\nDespite being small, these new features often nonetheless have a big impact. Here are some of our recent MVCs that did just that:\n\n## 1. Function: Assignee lists and milestone lists\n\nIntroduced in 11.1, [issue board assignee lists](/releases/2018/06/22/gitlab-11-0-released/#issue-board-assignee-lists) offer a way to monitor team bandwidth right within your issue board, by showing all issues assigned to a specific user. See [4 ways to use GitLab Issue Boards](/blog/4-ways-to-use-gitlab-issue-boards/#3-team-visibility-with-assignee-lists) for more details, and [check out the documentation for assignee lists here](https://docs.gitlab.com/ee/user/project/issue_board.html#assignee-lists).\n\nIn 11.2, we added [milestone lists](/releases/2018/08/22/gitlab-11-2-released/#issue-board-milestone-lists) to allow you to view all issues assigned to a specific milestone. With this visibility, you can move issues across different milestones easily to balance [issue weight](/releases/2018/08/22/gitlab-11-2-released/#summed-weights-in-issue-board-list). View [the documentation for milestone lists here](https://docs.gitlab.com/ee/user/project/issue_board.html#milestone-lists).\n\n## 2. Design: Merge request widget info and pipeline sections redesign\n\nSometimes it's not new functionality that makes a big difference, but just changing how you view it. In 11.1, we [tweaked the design of the information and pipeline sections](/releases/2018/07/22/gitlab-11-1-released/#merge-request-widget-info-and-pipeline-sections-redesign) in a [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/), making them easier to digest.\n\n![Merge request redesign](https://about.gitlab.com/images/11_1/mr-widget-info-pipeline.png){: .shadow.medium.center}\n\n## 3. Navigation: Groups dropdown\n\nAlso in 11.1, we made it easier to switch between groups and avoid disruption to your workflow by adding a [dropdown to the groups link in the top navigation](/releases/2018/07/22/gitlab-11-1-released/#groups-dropdown-in-navigation). There's no need to navigate away from your work, and your frequently visited groups are handily displayed for quick access.\n\n## 4. Shortcut: Confidential issue quick action\n\n[Quick actions](https://docs.gitlab.com/ee/user/project/quick_actions.html) make your GitLab life easier and are easy to contribute! As of 11.1 you can quickly and easily [mark an issue confidential right from the comment field](/releases/2018/07/22/gitlab-11-1-released/#confidential-issue-quick-action), thanks to a community contribution.\n\nInspired to contribute an MVC yourself? Find out [how to start contributing to GitLab](/community/contribute/). You can also check out some more [MVCs coming up in 11.3](/blog/epics-roadmap/).\n\nPhoto by [Ravali Yan](https://unsplash.com/photos/fleZeABaSWY?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/upwards?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[677,9,745],{"slug":4977,"featured":6,"template":680},"mvcs-with-big-results","content:en-us:blog:mvcs-with-big-results.yml","Mvcs With Big Results","en-us/blog/mvcs-with-big-results.yml","en-us/blog/mvcs-with-big-results",{"_path":4983,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4984,"content":4990,"config":4996,"_id":4998,"_type":14,"title":4999,"_source":16,"_file":5000,"_stem":5001,"_extension":19},"/en-us/blog/navigation-research-blog-post",{"title":4985,"description":4986,"ogTitle":4985,"ogDescription":4986,"noIndex":6,"ogImage":4987,"ogUrl":4988,"ogSiteName":667,"ogType":668,"canonicalUrls":4988,"schema":4989},"How we overhauled GitLab navigation","Users weren't getting what they needed from our navigation. Here are the steps we took to turn that experience around.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682884/Blog/Hero%20Images/navigation.jpg","https://about.gitlab.com/blog/navigation-research-blog-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we overhauled GitLab navigation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ashley Knobloch\"}],\n        \"datePublished\": \"2023-08-15\",\n      }",{"title":4985,"description":4986,"authors":4991,"heroImage":4987,"date":4993,"body":4994,"category":787,"tags":4995},[4992],"Ashley Knobloch","2023-08-15","\nGitLab navigation was complex and confusing - that was the message we received from our users through issues and other feedback channels. Initially, to address these concerns, we conducted research around proposed solutions, but quickly found they wouldn't help users achieve their goals well enough to warrant implementing them. In the process of learning what wasn't working and what wouldn't work, we still didn't have clarity around *why* the navigation wasn't working. This article chronicles our journey to finding that clarity and developing navigation that is easier to use and better suited to our users' needs.\n\n## Our approach\nAs a first step, we reviewed past research and user feedback to ensure we had a solid understanding of what we had done and learned already. We found that we still needed more insight into why proposed changes weren’t receiving enough positive feedback to implement them.\n\nOur goals were straightforward:\n- understand what users are doing in GitLab\n- study how they navigate the platform\n- learn why they need certain navigation elements \n\nOur perspective shifted from validating proposed solutions to going back to revalidate the problems that exist with our navigation experience. Our hypothesis was that with a deeper understanding of our users’ behavior and mental models for how they navigate around GitLab, we could develop concepts to better match their needs and improve their overall experience.  \n\nThe scope of features in GitLab and the number of user personas across GitLab made this challenging. We have [16 personas](https://about.gitlab.com/handbook/product/personas/#user-personas) to represent different types of users, all with unique goals and techniques to achieve those goals. We focused our efforts on a subset of those personas that best represented usage across GitLab to ensure a holistic understanding of different user needs. We wanted to learn how navigation among different personas was similar and where it differed, what worked well with the current navigation, and what challenges users faced.\n\n## Studying key persona cohorts\nWe conducted [diary studies](https://about.gitlab.com/handbook/product/ux/ux-research/diary-studies/) with cohorts of our key personas to learn what their primary tasks and workflows were at a deeper level. This provided us with many real-world examples of how they navigate to their tasks and why. We also learned what worked well with their current workflows, what pain points existed, and what workarounds were being used (such as creating browser bookmarks, typing in the URL to pull browser history, or keeping a bunch of browser tabs open) to streamline their tasks in GitLab. \n\nWe learned that for some users, many of their primary tasks don’t require much navigation within GitLab because they use outside tools that link into GitLab through notifications (e.g., Slack and email) or use direct links through other tools. We also learned that often users’ work is quite scoped in GitLab, and they would like easier access to some of their core features without having to wade through all of the other features they don’t use. This illuminated some unmet needs that would improve their workflows, such as having the ability to customize navigation to access things important to them more quickly and streamline their path to relevant projects.\n\nLearning more about our users from a foundational perspective ensured that we had a solid base to build upon when considering changes to the navigation.\n\n## Anchoring to a North Star\nTo anchor the redesign process in user problems more broadly, a review of past feedback was analyzed that revealed three overarching themes with navigation-related feedback. These themes helped to guide the process and to remind us of the key problems we were trying to solve: \n- minimize feeling overwhelmed (ability to customize left sidebar)\n- orient users across the platform (differentiating groups and projects)\n- pick up where you left off (switching contexts)\n\nThe team continually mapped back design concepts to these themes to ensure potential solutions were rooted in user problems. \n\n## Evaluating and iterating\nNext, several navigation design concepts were developed and shared with users for feedback. Multiple rounds of [solution validation testing](https://about.gitlab.com/handbook/product/ux/ux-research/solution-validation-and-methods/) were conducted with our key personas to determine which design concepts to move forward with. The testing revealed how users felt about each design and also how well each design supported users completing core tasks. We identified a final concept that supported mature and new GitLab users with common workflows.\n\n## Understanding mental models for sidebar organization\nWe wanted to revisit our groupings in the left sidebar because we’ve heard over time that the organization can be confusing and unintuitive, especially some categories such as Operations. We needed to understand our users’ mental models for how they would group these items, and why. Learning the thought processes behind their organization was critical for us to know what changes to make that would align with user expectations. \n\nWe ran facilitated [card sort](https://about.gitlab.com/handbook/product/ux/ux-research/mental-modeling/#card-sorting) studies with our key personas to understand how they would group items in the left sidebar, and why. This helped us learn some areas that could benefit from readjusting, such as the Manage and Operate categories. We learned that users most often preferred to have analytics items together, for example, which is reflected in the Analyze tab. This insight, combined with patterns in analytics data, informed changes to the groupings in the left sidebar to better support workflows. \n\n## Launching and learning\nPrior to launching to external users, the new navigation was released to internal team members and we collected [feedback](https://gitlab.com/gitlab-org/gitlab/-/issues/403059) to help iterate and improve the experience. \n\nNext, we launched the new navigation to external users as a toggle that could be turned on optionally. During this initial launch, a [longitudinal study](https://about.gitlab.com/handbook/product/ux/ux-research/longitudinal-studies/) was conducted with a sample of GitLab users to learn how they experienced the change in the context of their real work. Over time, the study would provide insight into adoption among the entire user base.  \n\nWe interviewed users prior to the monthlong study to learn more about their experience with the existing navigation. Then, they began using the new navigation while completing surveys and participating in interviews at checkpoints in the beginning, middle, and end of the month. This enabled us to capture their initial impressions of the new navigation, what they liked/disliked, how the new experience compared to the previous one, and if their sentiment changed over the course of the month as they continued to use the new navigation. \n\nUsers in this study found the new navigation to be an improvement from the previous one, and most preferred its features, including:\n- the ability to pin items streamlined common workflows\n- the new task-based sidebar categories in the sidebar, which they said felt more approachable, especially for newer users\n- the new navigation changes, which they said weren’t too overwhelming and felt familiar\n\nWe also learned about some opportunities to iterate and improve the new experience. For instance, some users pointed out:\n- the inability to pin entire Projects, Groups, or specific pages makes it difficult to streamline other workflows\n- some users unpin items accidentally\n- the overall lack of color can cause some features to blend in or be missed\n- it's not always easy to know what’s new in GitLab  \n\n## What’s next: Iterate, listen, and iterate again\nTo capture large-scale feedback on navigation over time, we launched a new navigation-focused quarterly survey in Q1 (February) of this year. This first quarter data established a baseline of our old navigation, and beginning in Q2 (May), we began collecting data on the new navigation experience. We will monitor this closely, and look for themes to help us learn what is working well and what may need further iteration. \n\nThis survey, along with our longitudinal study feedback and various other user feedback sources, will provide insights to help prioritize iterative improvements to the new navigation experience. Stay tuned for changes, and keep sharing [your navigation feedback](https://gitlab.com/gitlab-org/gitlab/-/issues/409005) with us!\n",[9,700,3117],{"slug":4997,"featured":6,"template":680},"navigation-research-blog-post","content:en-us:blog:navigation-research-blog-post.yml","Navigation Research Blog Post","en-us/blog/navigation-research-blog-post.yml","en-us/blog/navigation-research-blog-post",{"_path":5003,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5004,"content":5010,"config":5015,"_id":5017,"_type":14,"title":5018,"_source":16,"_file":5019,"_stem":5020,"_extension":19},"/en-us/blog/navigation-state-of-play",{"title":5005,"description":5006,"ogTitle":5005,"ogDescription":5006,"noIndex":6,"ogImage":5007,"ogUrl":5008,"ogSiteName":667,"ogType":668,"canonicalUrls":5008,"schema":5009},"Explore the past, present, and future of GitLab's Navigation design","Dive into the history of GitLab's navigation design and learn how GitLab's UX department is making incremental improvements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678236/Blog/Hero%20Images/navigation.jpg","https://about.gitlab.com/blog/navigation-state-of-play","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Explore the past, present, and future of GitLab's Navigation design\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Katherine Okpara\"}],\n        \"datePublished\": \"2019-07-31\",\n      }",{"title":5005,"description":5006,"authors":5011,"heroImage":5007,"date":5012,"body":5013,"category":787,"tags":5014},[4295],"2019-07-31","\nAs a UX department, we are responsible for creating navigational structures that are intuitive,\nin tune with user needs, and representative of the numerous workflows of our community of users.\nHowever, when designing for the needs of so many different people, we often have to make compromises\nand not everyone is pleased with the result. Navigation is not just about getting from point A to\nB; it can shape workflows, empower users to discover new, more efficient ways of working, and\nultimately determine how comfortable users are with a product. From the moment users log in for\nthe first time to when they start diving deeper into GitLab’s diverse feature set, our navigation\nstructure is critical for shaping the user's path and, ultimately, their success in using GitLab.\n\n### Why does this matter?\nOur UX Research team is always concerned with investigating and advocating for the needs of all\nGitLab users. We have a [history of research](https://gitlab.com/gitlab-org/uxr_insights/blob/master/Navigation-Research-Summary.md)\nthat has resulted in incremental improvements to GitLab’s navigation over time. After gathering\nfeedback from many sources over the years, we are excited to lead a strategic, dedicated\ninitiative to improve GitLab’s navigation. As part of this initiative, we will consider the\ngoals and frustrations of all users and assess the experiences shaped by the most common workflows\nthroughout GitLab. We will continue to gather feedback from our product users, customers, and\ninternal stakeholders as a way to identify key opportunities for improvement.\n\n### History of GitLab's navigation\nBefore we outline our future research and design plans, let’s take a look back and understand GitLab’s\nnavigation design journey.\n\n![Original design](https://i.imgur.com/9oZq3de.png){: .medium.center}\n\nGitLab's original design\n{: .note.text-center}\n\nThere are two ways to navigate throughout GitLab: globally and contextually. Global navigation refers\nto elements that are always available (e.g., browsing between groups and projects using the top navigation bar).\nContextual navigation refers to the elements that change based on the page a user is viewing.\nBalancing these levels of navigation has consistently been one of the top challenges in each\nphase of GitLab’s navigation design.\n\nIn a [June 2016 blog post describing the pain points that led to\nGitLab’s first navigation redesign](/blog/navigation-redesign/), [Dmitriy Zaporozhets](/company/team/#dzaporozhets),\nGitLab’s co-founder and engineering fellow, stated the following as reasons why GitLab’s UI did not\nwork very well:\n\n- *The current navigation is not well organized. There are places where it does not follow logic or best practices.*\n- *We cannot use muscle memory with the collapsed menu sidebar for fast click on links because the menu has too many items, with new ones added every once in a while.*\n- *It's hard to navigate when you come to GitLab via a link from another app (like chat, for example) because of the lack of a logical hierarchy in our UI navigation.*\n\nTo address these pain points, Dmitriy worked with GitLab’s UX designer to iterate through proposed\nchanges. They landed on a navigation design that introduced a dark-colored, collapsible left\nsidebar to house global navigation elements, along with a contextual top navigation that changed\nrelative to pages the user visited in GitLab.\n\n| Group-level navigation | Project-level navigation|\n|------------------------|-------------------------|\n|![Group level](https://i.imgur.com/HD7ElxQ.png){: .shadow}| ![Project level](https://i.imgur.com/w04Zq6D.png){: .shadow} |\n\nAfter this redesign, the team continued to iterate and make incremental changes to the navigation.\nThese changes became more significant when the option to “pin” (to the screen) or “unpin” the left\nsidebar was introduced. “Pinning” would keep the sidebar static while “unpinning” would remove\nit from the screen and place it under a hamburger menu icon. There were more unfavorable reactions\nto the changes after [GitLab’s 9.0 release](/releases/2017/03/22/gitlab-9-0-released/#updated-navigation-ce-ees-eep),\nwhen the [left sidebar was converted into a dropdown](https://gitlab.com/gitlab-org/gitlab-ce/issues/26200)\nfor all users, permanently placed in a hamburger menu at the far left of the top navigation bar.\n\n| \"Pinned\" | \"Unpinned\" |\n|----------|------------|\n| ![Pinned](https://i.imgur.com/IMYn45r.png){: .shadow} | ![Unpinned](https://i.imgur.com/Jag1HeG.png){: .shadow} |\n\nAfter receiving this feedback, our UX team conducted additional rounds of usability testing and [in\n2017 we made significant improvements to the navigation](/blog/redesigning-gitlabs-navigation/).\nThe decision to reorganize the structure of global and contextual content was one of the more prominent changes.\nGlobal navigation elements would now exist in the top navigation while contextual navigation elements\nwould exist in the left sidebar. These changes were first implemented behind a feature flag, to give\nusers a chance to try out the new flow and tell us how they felt about it. We created\na [feedback issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/34917) so users could discuss\ntheir experiences and share their likes and dislikes in an open, collaborative space.\n\nThe feedback issue led to additional improvements and highlighted more opportunities to optimize\nGitLab’s navigation. Our design team used this feedback to iterate for two release cycles and\nidentify changes that would bring the most benefit, such as\n[flyout menus in the left sidebar](https://gitlab.com/gitlab-org/gitlab-ce/issues/34026)\nand [improvements to breadcrumbs](https://gitlab.com/gitlab-org/gitlab-ce/issues/35269).\nIn September 2017, [GitLab’s navigation redesign became official](/blog/unveiling-gitlabs-new-navigation/)\nand turned on for all users.\n\n![2017 redesign](https://i.imgur.com/ovRRBwE.png){: .shadow.medium.center}\n\nOur 2017 redesign\n{: .note.text-center}\n\nGitLab’s navigation design has not drastically changed since the 2017 redesign, aside\nfrom incremental changes made when adding new feature links to the left sidebar and the\nintroduction of instance-wide workflows. However, even with all of these notable improvements,\nsome users are still confused by finding their way around GitLab, especially when interacting\nwith the left sidebar. Many users have unique workflows based on the features they use, companies\nthey work with, and the amount of time they’ve been using GitLab. As a result, even design decisions\nthat are informed and supported by research can receive negative feedback from those who are\nimpacted by the changes.\n\nEven in 2019, our users describe usability issues that reflect the pain points described in our\nfirst navigation redesign blog post. Presently, a large portion of the pain points can be attributed to\nGitLab’s rapid growth and increased focus on\n[shipping features for the entire DevOps lifecycle](/stages-devops-lifecycle/). As the product\ncontinues to grow, users who only interact with specific features can become overwhelmed by all\nof the information and paths available in the interface. In order to avoid a future pattern\nof frequent changes to the navigation structure, we need to create a systematic approach for\naddressing the diverse use cases that come along with a rapidly growing product.\n\n### What we've learned\n\nThe outcome of all of the research we have conducted over the years is an understanding of the\ncore pain points and usability issues users face when navigating throughout GitLab. I believe that\nthe main themes of our research initiative should be **context** and **discoverability**.\n\n- **Context:** How can we help users maintain context and stay oriented while switching between levels\nof navigation and features in different product stages?\n\n- **Discoverability:** How can navigation be a method of promotion and discovery for new features\nwhile still preserving the findability of commonly used features?\n\nThese two themes are important for creating a systematic approach to organizing content in GitLab's UI.\nWe've had [internal discussions](https://gitlab.com/gitlab-org/ux-research/issues/108) around aligning\nGitLab's UI with [our DevOps stages](/handbook/product/categories/#devops-stages) to categorize\ncontent in a way that reflects the evolution of our product and organization. However, the findings\nfrom [a series of research studies](https://gitlab.com/groups/gitlab-org/-/epics/1236) cautioned us\nagainst moving in that direction, to prevent a negative impact on findability and confusion in users\nwho are not familiar with GitLab's DevOps stages.\n\nWhile it may be possible to teach users about the DevOps stages over time, the feedback from this\nresearch showed us that the additional layers of sub-navigation could make navigation a more\ncumbersome experience. Additionally, some of the names of the DevOps stages are broad and not\nimmediately descriptive (e.g., “Manage” and “Create”). This may require users to do more guesswork\nto understand the variety of features that could fall under each stage. Our upcoming research\ninitiative provides us with the opportunity to explore how we can build an intuitive, logical\nsystem for categorizing new features and guiding users through tasks that cross multiple product stages.\n\nTo read more about the key findings from our prior navigation research, please visit\nthe [UX research insights repository](https://gitlab.com/groups/gitlab-org/-/epics/1555) for a summary of\nour research.\n\n## What comes next?\nWe will investigate the paths that users take throughout GitLab and consider how we should balance\nthe needs of users who have contrasting team sizes, roles, and product tiers. Our goal is to find ways\nto align with the principle of [convention over configuration](/handbook/product/product-principles/#convention-over-configuration)\nwhile still addressing the diverse needs of our users. Please see\nthe [navigation research initiative epic](https://gitlab.com/groups/gitlab-org/-/epics/1342) for more information.\n\nThis research initiative will be conducted in the following phases:\n\n1. [Stakeholder interviews](https://gitlab.com/gitlab-org/ux-research/issues/211): Understand what\ninternal stakeholders need and expect from the flow of GitLab's navigation, feature discoverability,\nand usability.\n2. [User interviews](https://gitlab.com/gitlab-org/ux-research/issues/236): Gather insight from GitLab\ncustomers about their experiences navigating throughout GitLab, learning how to use the product, and\ndiscovering new features. Focus on use cases that cross\nmultiple DevOps stages.\n3. [Explore and assess key user journeys](https://gitlab.com/gitlab-org/ux-research/issues/221): Work with\nGitLab product designers to document the common paths and tasks\nour [user personas](/handbook/product/personas/#user-personas) complete,\nhighlighting usability issues and ranking them by severity.\n3. [Share UX research recommendations](https://gitlab.com/gitlab-org/ux-research/issues/222): Recommended\nchanges based on information architecture best practices and feedback from users and\nstakeholders. Share results with Product and UX teams, discuss solutions, and outline next steps.\n\n### We need your help!\nIf you could wave a magic wand, what would be your ideal vision for GitLab’s navigation?\n\nPlease share your top pain points, suggestions for improvement, or things you like about GitLab's\nnavigation design in the comments below!\n",[9,700,1698],{"slug":5016,"featured":6,"template":680},"navigation-state-of-play","content:en-us:blog:navigation-state-of-play.yml","Navigation State Of Play","en-us/blog/navigation-state-of-play.yml","en-us/blog/navigation-state-of-play",{"_path":5022,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5023,"content":5028,"config":5033,"_id":5035,"_type":14,"title":5036,"_source":16,"_file":5037,"_stem":5038,"_extension":19},"/en-us/blog/new-default-container-image-gitlab-saas-linux-runnners",{"title":5024,"description":5025,"ogTitle":5024,"ogDescription":5025,"noIndex":6,"ogImage":2449,"ogUrl":5026,"ogSiteName":667,"ogType":668,"canonicalUrls":5026,"schema":5027},"How to use Ruby 3.1 as the default container image on GitLab SaaS Runners on Linux","Learn about the new image and how to ensure CI job compatibility.","https://about.gitlab.com/blog/new-default-container-image-gitlab-saas-linux-runnners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Ruby 3.1 as the default container image on GitLab SaaS Runners on Linux\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2022-12-13\",\n      }",{"title":5024,"description":5025,"authors":5029,"heroImage":2449,"date":5030,"body":5031,"category":743,"tags":5032},[2454],"2022-12-13","\nOn January 12, 2023, we will change the [default container](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html) image used on GitLab Saas Runners on Linux from Ruby 2.5, which is end of life, to Ruby 3.1.\n\nIf you have specified a container image in your CI/CD job, then there is no impact to you. In other words, your GitLab SaaS CI/CD job will only run in the default container if no image is set for the job in the `.gitlab-ci.yml` pipeline file.\n\nTo check, open the log view of a CI job and note the image used. For example, if you have not added an image to your CI job on GitLab SaaS, then the job log will have the following:\n\n```\nUsing Docker executor with image ruby:2.5 ...\n\n```\n\nIf you have not set a container image in your CI job, then after this change, the job will run in a Ruby 3.1 container.\n\n## How can I check for any build issues on Ruby 3.1?\n\nWhile it is not expected that running a CI/CD job on Ruby 2.5 is incompatible with Ruby 3.1, to check, simply configure the job to run in a Ruby 3.1 container. To do so, edit the `.gitlab-ci.yml` and add the following:\n\n```\ndefault:\n  image: ruby:3.1\n```\n\n## Future plans\n\nIn addition to this change, we plan to [define](https://gitlab.com/gitlab-org/gitlab/-/issues/384992) a new container image maintenance process for GitLab SaaS Runners on Linux. The new policy aims to ensure that the default image used is updated so that it contains the latest security fixes.\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\n",[1440,9,1090,1293,231],{"slug":5034,"featured":6,"template":680},"new-default-container-image-gitlab-saas-linux-runnners","content:en-us:blog:new-default-container-image-gitlab-saas-linux-runnners.yml","New Default Container Image Gitlab Saas Linux Runnners","en-us/blog/new-default-container-image-gitlab-saas-linux-runnners.yml","en-us/blog/new-default-container-image-gitlab-saas-linux-runnners",{"_path":5040,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5041,"content":5047,"config":5052,"_id":5054,"_type":14,"title":5055,"_source":16,"_file":5056,"_stem":5057,"_extension":19},"/en-us/blog/new-gitlab-com-terms-of-service",{"title":5042,"description":5043,"ogTitle":5042,"ogDescription":5043,"noIndex":6,"ogImage":5044,"ogUrl":5045,"ogSiteName":667,"ogType":668,"canonicalUrls":5045,"schema":5046},"New GitLab.com Terms of Service: Coming soon","We're updating GitLab.com Terms of Service to comply with upcoming GDPR regulations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671239/Blog/Hero%20Images/contract-document-documents-48148.jpg","https://about.gitlab.com/blog/new-gitlab-com-terms-of-service","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New GitLab.com Terms of Service: Coming soon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jeremiah\"}],\n        \"datePublished\": \"2018-05-01\",\n      }",{"title":5042,"description":5043,"authors":5048,"heroImage":5044,"date":5049,"body":5050,"category":299,"tags":5051},[2725],"2018-05-01","\n\n## Why the change?\n\nMany of you are aware of the pending implementation of the [General Data Protection Regulation (GDPR)](/privacy/privacy-compliance/), which has specific requirements for how personal data is protected and managed. As a result of these new requirements, we will be updating our Terms of Service (TOS) and will also need to ensure that GitLab.com users are aware of the change and agree to these new terms.\n\nIn the past, we’ve been able to offer a more passive approach to accepting TOS, but going forward the new process will **require a distinct step from users to agree to them**. As a result of this change, you will be asked to review and agree to the updated TOS.\n\n**Key point:** When the new TOS and acceptance requirement goes live, **you will be unable to access** GitLab.com until you have accepted the new TOS.\n{: .alert .alert-gitlab-orange}\n\n## What do I need to do and when?\n\nBecause many of you access GitLab.com through API and Git interactions, we're planning on a two-phase implementation. The first phase will focus on users who access GitLab.com from the web. Soon, when you visit GitLab.com from the web, you will be presented with a new TOS to accept.\n\nNote, at this point, API access to and Git interactions with GitLab.com will _not yet_ be affected. If you use GitLab.com via any automated API or Git process, **please log into GitLab.com as those API/Git users and navigate to [https://gitlab.com/-/users/terms](https://gitlab.com/-/users/terms) to accept the terms.** (Note: this page will be active shortly)\n\n### May 23, 2018\n\nOn May 23, 2018, the new TOS requirement will be enforced for all traffic. At this point, all web traffic, API access, and Git interactions will be blocked for any GitLab.com user that has not accepted the new TOS. If you use GitLab.com via any automated API or Git process, access will stop working on May 23, 2018 if you have not accepted the new TOS. **Remember, if you accept the TOS by May 23 for your users, you will not experience any disruption.**\n\nWe are committed to protecting your data and your privacy and being transparent in our processes and approach. If you have any questions or concerns, please leave us a comment here.\n\n[Cover image](https://www.pexels.com/photo/sign-pen-business-document-48148/) licensed\nunder [CC X](https://www.pexels.com/photo-license/)\n{: .note}\n",[9,675],{"slug":5053,"featured":6,"template":680},"new-gitlab-com-terms-of-service","content:en-us:blog:new-gitlab-com-terms-of-service.yml","New Gitlab Com Terms Of Service","en-us/blog/new-gitlab-com-terms-of-service.yml","en-us/blog/new-gitlab-com-terms-of-service",{"_path":5059,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5060,"content":5065,"config":5070,"_id":5072,"_type":14,"title":5073,"_source":16,"_file":5074,"_stem":5075,"_extension":19},"/en-us/blog/next-generation-container-registry",{"title":5061,"description":2468,"ogTitle":5061,"ogDescription":2468,"noIndex":6,"ogImage":5062,"ogUrl":5063,"ogSiteName":667,"ogType":668,"canonicalUrls":5063,"schema":5064},"Introducing the next generation of the GitLab.com Container Registry","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663383/Blog/Hero%20Images/tanuki-bg-full.png","https://about.gitlab.com/blog/next-generation-container-registry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing the next generation of the GitLab.com Container Registry\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2022-04-12\",\n      }",{"title":5061,"description":2468,"authors":5066,"heroImage":5062,"date":5067,"body":5068,"category":675,"tags":5069},[2473],"2022-04-12","\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\nIn the coming weeks, we will begin the second phase of the rollout of the new version of the Container Registry on GitLab.com. Prior to deploying this update, we wanted to clearly communicate the planned changes, what to expect, and why we are excited.\n\nIf you have any questions or concerns, please don't hesitate to comment in the [epic](https://gitlab.com/groups/gitlab-org/-/epics/5523).\n\n## Context \n\nIn [Milestone 8.8](/releases/2016/05/22/gitlab-8-8-released/), GitLab launched the MVC of the Container Registry. This feature integrated the Docker Distribution registry into GitLab so that any GitLab user could have a space to publish and share container images.\n\nBut there was an inherent limitation with Docker Distribution, as all metadata associated with a given image/tag was stored in the object storage backend. This made using that metadata to build API features (like storage usage visibility, sorting, and filtering) unfeasible. The most recent Container Registry update added a new PostgreSQL backend, which is used to store the metadata. Additionally, this new version also includes an automatic online garbage collector to remove untagged images and recover storage space.\n\nIn November 2021, we started [phase 1](/blog/gitlab-com-container-registry-update/) of the migration. This completed in January 2022 without any significant issues. Since then, every new image repository pushed to GitLab.com uses the new, metadata database-backed registry. Today, nearly 20% of Container Registry traffic is already routed to the new version.\n\nNow we are ready to begin [Phase 2 of the migration](https://gitlab.com/gitlab-org/container-registry/-/issues/374#phase-2-migrate-existing-repositories). This will migrate image repositories created before January 22, 2022, to the new Container Registry. Once complete, we can unblock many of the features that you've been asking for.\n\n## Why we are excited \n\n- [Storage visibility for the Container Registry](https://gitlab.com/groups/gitlab-org/-/epics/7225)\n\n- Performance improvements for list operations when using the GitLab API and UI\n\n- [Redesign of the UI](https://gitlab.com/groups/gitlab-org/-/epics/3211)\n  - [Build and commit metadata for tags built via CI](https://gitlab.com/gitlab-org/gitlab/-/issues/197996)\n  - [Search by tag name](https://gitlab.com/gitlab-org/gitlab/-/issues/255614)\n  \n- [Resolve: Group/project path updates break the Container Registry](https://gitlab.com/gitlab-org/gitlab/-/issues/18383)\n\n## The plan \n\nWe're planning a [phased migration](https://gitlab.com/gitlab-org/container-registry/-/issues/374#phase-2-migrate-existing-repositories), starting with GitLab.org repositories. After that, we'll move on to the Free tier, then on to Premium and Ultimate. We'll roll this out incrementally to maintain safety for customers and provide our team with an opportunity to identify and address any concerns.\n\n## Timing \n\nMigration begins: April 18th, 2022\nMigration ends: July 8th, 2022.\n\nTentative dates by tier:\n\n- GitLab internal projects: April 14 - April 18\n- Free: April 18 - May 18\n- Premium: May 18 to June 18\n- Ultimate: June 18 to July 8\n\nFor more information about the planned, percentage-based rollout, please refer to this [epic](https://gitlab.com/groups/gitlab-org/-/epics/6427).\n\n## What to expect\n\n- For each repository, the migration will only target _tagged_ images. Untagged and unreferenced manifests, and the layers they reference, will be left behind and become inaccessible. Untagged images were never visible through the GitLab UI or API, but they were left behind in the backend after becoming dangling.\n\n- Once migrated to the new registry, repositories will be subject to continuous online garbage collection, deleting any untagged and unreferenced manifests and layers that remain as such for longer than 24 hours.\n\n- To ensure data consistency, the migration of each repository requires the enforcement of a small read-only period at the very end. This period is expected to be less than ten seconds for the vast majority of repositories. During this period, an error message will be returned when trying to upload or delete data, prompting clients to try again. Most clients, will automatically retry several times, which should eventually succeed as the read-only enforcement lifts. We also put a mechanism in place to automatically cancel and reschedule migrations that are taking longer than expected. Nevertheless, if you experience any issues, please comment in the [epic](https://gitlab.com/groups/gitlab-org/-/epics/5523).\n\n## FAQ \n\n- Do I need to do anything?\n  - No, the process is fully automated. But if you have any untagged images that you'd like to preserve, please be sure to tag them as soon as possible.\n\n- Is there anything I can do to help? \n  - Yes! Although no action is necessary, we recommend activating the Container Registry [cleanup policies](https://docs.gitlab.com/ee/user/packages/container_registry/#cleanup-policy) for any relevant projects.\n\n- Is the update required? \n  - Yes. With this change, we can deliver a more modern and scalable product. You don't want to miss out on those features!\n",[1440,9,231],{"slug":5071,"featured":6,"template":680},"next-generation-container-registry","content:en-us:blog:next-generation-container-registry.yml","Next Generation Container Registry","en-us/blog/next-generation-container-registry.yml","en-us/blog/next-generation-container-registry",{"_path":5077,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5078,"content":5084,"config":5089,"_id":5091,"_type":14,"title":5092,"_source":16,"_file":5093,"_stem":5094,"_extension":19},"/en-us/blog/not-everyone-has-a-home-office",{"title":5079,"description":5080,"ogTitle":5079,"ogDescription":5080,"noIndex":6,"ogImage":5081,"ogUrl":5082,"ogSiteName":667,"ogType":668,"canonicalUrls":5082,"schema":5083},"Coworking home offices, working on the go - GitLab on remote work","GitLab team members share how they make their unique workspaces work for them, and see how they could work for you too!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680818/Blog/Hero%20Images/homeofficecover2.jpg","https://about.gitlab.com/blog/not-everyone-has-a-home-office","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Coworking home offices, working on the go - GitLab on remote work\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-09-12\",\n      }",{"title":5079,"description":5080,"authors":5085,"heroImage":5081,"date":5086,"body":5087,"category":808,"tags":5088},[672],"2019-09-12","\n\n_At GitLab, our team doesn’t wake up at the same time and commute the same roads to sit in the same office. In fact, some of our team members don’t have an office at all! As a globally distributed company with an all-remote workforce, we have an exceptionally diverse set of team members spread across six continents. In this series, we explore how GitLab team members use the autonomy our company affords them to create remote workspaces that suit their lifestyle and cater to their hierarchy of needs: whether that involves creating a cozy and personalized home office space, or diving into the unknown by working while traveling._\n\n\nIn my first job out of college as a reporter, I worked in a tiny office, which was actually a converted closet in the far back corner of the suburban office building. It was so cold that I had blankets, sweaters, a hot water kettle, and a space heater that hummed even in the middle of the blistering summer. Fast-forward past working at tiny desks in an elementary school classroom, a shared desk scenario in a big San Francisco office building (my snow parka stayed with me at the office), a dubious apartment-turned-company-office setup to today: where I work from a home office in [Oakland, California](https://en.wikipedia.org/wiki/Oakland,_California) and control my own thermostat. Best of all, I get to work alongside my longtime colleague and frequent video call interrupter, [Milly](/company/team-pets/#154-milly).\n\nLike me, organic search manager [Shane Rice](/company/team/#shanerice) works primarily from a remote home office, but unlike me he likes to keep his office on the cool side, in terms of temperature and decor.\n\n“Living in [Florida](https://en.wikipedia.org/wiki/Pensacola,_Florida), I put a lot of thought into keeping my office comfortable. When we converted the building from a shed, we added insulation in the ceiling and walls to save energy and help keep the temperature cozy as I work,” says Shane. “I use a wall-mounted AC during the summer and a space heater during the winter.”\n\nAnyone who has been on a Zoom call with Shane will remember his eye-catching decor, and the most recent addition to his family and the GitLab pets cohort, [Hendrix](https://grabs.shanerice.com/lcFp8n).\n\n![shanerice](https://about.gitlab.com/images/blogimages/home-office/view_from_desk.jpg){: .shadow.small.center}\n\n“This space is all about the things I love. I’ve got a bulletin board with memories, mostly from my kids. I save their drawings and cards and pin them up there,” says Shane. “The rest of the walls have posters and toys I've collected over the years. I framed my posters to show up on my video calls, and they're a great conversation starter when I'm meeting someone for the first time.”\n\n![stickers](https://about.gitlab.com/images/blogimages/home-office/door.jpg){: .shadow.small.center}\n\"One of my favorite things to get when I travel for work are stickers, but I hate to use them on my laptop because I know I won’t use it forever,\" says Shane. \"Instead of saving my stickers, I decided to put them on my office door. Now I can take them with us if we ever move.\"\n{: .note.text-center}\n\nBut not everyone at GitLab works from a home office. In fact, we have many team members that worked at home for a while, but now use a shared workplace, like [Alessio Caiazza](/company/team/#nolith), senior backend Infrastructure engineer.\n\nAlessio, who is based in [Florence, Italy](https://en.wikipedia.org/wiki/Florence), worked from home during his wife’s pregnancy and for the first six months after his son was born. “I loved that period, staying home in a quiet place, with my standing desk and multi-monitor setup,” Alessio said. “Being able to take care of my wife first, and my son later on. I'll always be grateful to GitLab for this opportunity.”\n\nBut [working from home with children can be challenging](/blog/working-remotely-with-children-at-home/) and isn't the best option for everyone. After a while, Alessio realized he needed some time and space to transition from dad mode to engineer mode, and moved his setup to a coworking space. “I used to say that working is the thing that happens between changing diapers. Also having less time for social interaction forced me to search for other adults during working hours. So now I have the best of the two worlds, I'm a happy dad and a happy engineer.”\n\n![florence](https://about.gitlab.com/images/blogimages/home-office/alessio.JPG){: .shadow.small.center}\nWhile Alessio has a nice setup in his usual coworking space, on this day he was driven outside into the summer heat after the AC failed in his building, making it too hot to handle.\n{: .note.text-center}\n\n## On the road with GitLab\n\nWhile many GitLab team members work from an consistent office setup, we have a subset of team members that have surrendered a cozy home office and sleek coworking remote space to work from the open road.\n\n[Nicole Schwartz](/company/team/#nicoleschwartz), product manager at GitLab, is embarking on a zig-zagging road trip across the United States, visiting GitLab team members and speaking at conferences. You might expect that life on the road means unpredictable working conditions, but Nicole has discovered that in most cases there is a coworking space or cafe near where she’s located for the day.\n\n![hotel](https://about.gitlab.com/images/blogimages/home-office/nicole_hotel.jpg){: .shadow.small.center}\nMost hotel rooms have WiFi, so Nicole typically starts her mornings in the hotel before moving on to a local cafe.\n{: .note.text-center}\n\n“I try to go for a local cafe if Yelp says they have WiFi, but in a pinch I’ll go to Panera, Starbucks, McDonalds,” Nicole says. “Once I had to drive over an hour to find a Starbucks! There have been occasions I have had to tether from my phone (GoogleFi) or call in on my phone; neither option is ideal.”\n\nWhen you’re highly mobile, the scenery changes quickly and the working conditions aren't always glamorous. While writing in about her experience, Nicole was sitting on a pretty uncomfortable chair with a tiny desk at a local coffee shop in Pittsburg, Kansas. She wasn’t able to bring her [laptop stand](https://www.therooststand.com/) with her because there wasn’t room in her backpack, and there was some teenagers chatting and a baby crying in the background: “Eh, it happens,” she writes.\n\n### The key to working on the road: flexibility and resourcefulness\n\n[Kerri Miller](/company/team/#kerrizor), [Create](/stages-devops-lifecycle/create/) backend engineer, spends about 40% of her time away from her homebase in Seattle, Washington, adventuring across North America by motorcycle. Kerri's work schedule varies depending upon the conditions. Generally, Kerri tends to wake early and work for a few hours where she's lodging before heading out on her motorcycle, wrapping up any remaining tasks in the evening. If she's someplace hot, she'll wake early to travel and then work in coffee shops or public libraries in the afternoon.\n\nOne of Kerri's favorite workplace setups was in a small town where the only publicly available WiFi was at the local bakery/coffee shop.\n\n\"Recognizing this fact, they offered a 'WiFi only' option on their menu, where for $5 you’d get unlimited internet access for the day, and access to a small RV to the side where they had set up several desks and tables and comfy chairs for the community,\" Kerri says. \"Large windows overlooked a prairie filled with sheep.\"\n\n![morning sheep](https://about.gitlab.com/images/blogimages/home-office/morning_sheep.jpg){: .shadow.small.center}\n\"You can't get this view from WeWork!\" says Kerri.\n{: .note.text-center}\n\nBoth Kerri and Nicole note that the trick to having a successful cross-country journey is a broad and distributed network of friends and colleagues. Kerri generally shares her travel plans in advance on the relevant GitLab Slack channels and on Twitter to see who she might visit on the [next leg of her journey](http://motozor.com/). Similarly, Nicole has been using the [GitLab Visiting Grant](/handbook/incentives/#visiting-grant) and setting up coworking days with our colleagues across the United States.\n\nCurrently dispatching from the scenic backdrop of [Copper Harbor, Michigan](https://en.wikipedia.org/wiki/Copper_Harbor,_Michigan), serverless engineering manager [Nicholas Klick](/company/team/#nicholasklick) has been working from his backpack for the past seven years.\n\n![nicholasklick](https://about.gitlab.com/images/blogimages/home-office/nicholasklick.JPG){: .shadow.small.center}\nNicholas is always in search of good WiFi, bringing a Verizon MiFi as a backup.\n{: .note.text-center}\n\nThough he did grab a desk at a local coworking space where he spends two to three days a week, his spirit is free to roam while his career continues to grow working all-remote at GitLab.\n\n_In the second part of our series, we'll explore how some GitLab team members are going out of their comfort zone and integrating global travel into their workflows and augmenting their workplaces (and expectations) accordingly._\n\n[Cover photo](https://unsplash.com/photos/GaBDdA63GcQ) by [Roberto Nickson](https://unsplash.com/@rpnickson?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/home-office?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n\n",[9,832],{"slug":5090,"featured":6,"template":680},"not-everyone-has-a-home-office","content:en-us:blog:not-everyone-has-a-home-office.yml","Not Everyone Has A Home Office","en-us/blog/not-everyone-has-a-home-office.yml","en-us/blog/not-everyone-has-a-home-office",{"_path":5096,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5097,"content":5102,"config":5106,"_id":5108,"_type":14,"title":5109,"_source":16,"_file":5110,"_stem":5111,"_extension":19},"/en-us/blog/observability",{"title":5098,"description":5099,"ogTitle":5098,"ogDescription":5099,"noIndex":6,"ogImage":665,"ogUrl":5100,"ogSiteName":667,"ogType":668,"canonicalUrls":5100,"schema":5101},"We're moving our observability suite to Core","Our gift to you for 2020: Metrics, logging, and tracing and alerting are coming soon to Core!","https://about.gitlab.com/blog/observability","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're moving our observability suite to Core\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-12-16\",\n      }",{"title":5098,"description":5099,"authors":5103,"heroImage":665,"date":2215,"body":5104,"category":1517,"tags":5105},[672],"\nHappy New Year to our developer community! We're moving a big portion of our [observability features](/blog/monitoring-team-update/) – custom metrics, logging, tracing and alerting – from our proprietary codebase to our open source codebase in 2020. We aim to complete this migration by early next year, and [you can follow along with our progress in this Epic](https://gitlab.com/groups/gitlab-org/-/epics/2310). While we're giving you the gift of 20/20 vision into your production environment as a thank you for all you've contributed, there are also three practical reasons as to why we're moving our observability suite to Core.\n\n## Why we're moving observability to Core\n\n### It's part of our stewardship model\n\nThe first reason being that it is our [stewardship](/company/stewardship/) mandate. Our product is open-core and our pricing model is transparent and buyer-based. A [buyer-based pricing model](/company/pricing/#the-likely-type-of-buyer-determines-what-features-go-in-what-tier) means we try to think about what type of buyer is going to get the most value out of a feature as we determine whether a feature belongs in our open source Core product or our paid versions of GitLab.\n\n\"If it's a feature for a single developer who might be working on his or her own individual project, we want that to be in Core because it invites more usage of those tools and we get great feedback in the form of new feature requests and developer contributions,\" says [Kenny Johnston](https://about.gitlab.com/company/team/#kencjohnston), director of product, [Ops](/direction/ops/) at GitLab. \"It's an important part of our product philosophy to ensure we keep developer focused features in our Core product.\"\n\n### Observability belongs in Core\n\nOur mission is to provide an end-to-end DevOps solution for developers that is also open source, and we were falling a bit short on the Ops side of things by keeping essential observability tools in a proprietary codebase.\n\n\"Before this move, If you were using Gitlab's open source version, you could attach a Kubernetes cluster and deploy applications to it, but then your ability to observe how your users are interacting with it in production was limited,\" says Kenny. \"Now, you can get out-of-the-box metrics, create customized ones, get access to log tailing and searching and see traces – all within GitLab. Those were all non-existent in Core previously.\"\n\nThe fact is, the three pillars of observability: [custom metrics](/direction/monitor/platform-insights/), [logging](/direction/monitor/#logging), and [tracing and alerting](/direction/monitor/platform-insights/), are fundamental to the complete DevOps lifecycle even for those single developers working on their own projects. That means they belong in our Core product.\n\n### We want your input on monitoring\n\nThe third reason is that we value your contributions, and we're hoping that by making our observability tools open source you will make valuable improvements to the code so that other developers can benefit from your insight. This is the gift you offer us every day, and so now we have a wishlist for you.\n\n## The three pillars of observability are on our wish list\n\n### Custom metrics\n\nGitLab has a strong integration with Prometheus that allows users like you to [monitor key metrics for applications](/direction/monitor/platform-insights/) deployed on Kubernetes or a different [Prometheus server](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#manual-configuration-of-prometheus), without ever leaving our interface. Common reporting metrics include system metrics such as memory consumption, as well as error and latency rates. GitLab will automatically detect certain metrics from our metrics library, and you can customize these metrics based on your needs.\n\nBut there is always room for improvement. If you see something that you think needs improvement with metrics, or any of our observability features, please submit an issue or a merge request, or even contribute changes to our open source codebase.\n\n### Logging\n\nYou can see [logs of running pods on your Kubernetes clusters](https://docs.gitlab.com/ee/user/clusters/agent/index.html), without the hassle of having to toggle between applications, since logging is integrated within GitLab. But our [current logging capabilities](/direction/monitor/platform-insights/) are best described as log tailing. Users can see what is essentially a live stream of their logs within GitLab. Is our log tailing providing enough observability into the health of your deployed Kubernetes clusters? We're hoping you can help us innovate new ways to make our logging tools more valuable.\n\n\"I would love to have more insight into how users want to interact with [logging], if log tailing is sufficient, how much they want to move back and forth,\" says Kenny. \"Some of those contributions can come in the form of commentary or issues being created, but people could also take it upon themselves to adjust that view so that is better suited to their needs when tailing a log.\"\n\n### Tracing and alerting\n\nWhile there are certain metrics that are commonly reported about a deployed application — such as how much CPU is being consumed, the speed to process a request, etc., [tracing](https://docs.gitlab.com/ee/operations/tracing.html) allows you to monitor deployed applications in more depth and be alerted to any issues with the performance or health of the application. But, like logging, our [tracing and alerting capabilities are in the earliest stages](/direction/monitor/platform-insights/).\n\n\"Today, our tracing is fairly minimal,\" says Kenny. \"We have an embedded UI for Jaeger, but we'd love to see contribution from members of the Jaeger community for more deep integration into GitLab. Maybe developers and operators who use GitLab would like to see more of the Jaeger UI experience directly in GitLab.\"\n\nOur alerting capabilities are also a bit clunky. You have to define it directly in the UI and code configuration. By better uniting our tracing integration with Jaeger with our alerting capabilities, we could create a more synchronized user experience.\n\n## Closing the DevOps loop\n\nIn order for GitLab to function as an end-to-end DevOps solution, our users must be able to apply our ticketing system all the way from issue to production.\n\n\"I'm really interested in the use case where people are creating issues for alerting when something goes wrong with their production environments, and then how they interact with observability information in the incident management issue itself,\" explains Kenny.\n\nPerhaps you need an issue template for incidents that will show a particular log line. Or there might be a custom metric that is so commonly used, it ought to be added to our metrics library.\n\n\"If you don't like the way that your alerting is set up, or you don't like the way that your log system is aggregated we'd love your contributions. If you don't like how metric charts, logs or traces are displayed in fire-fighting issues we'd love your contributions. GitLab is open source. You can contribute improvements to your observability tool just like you can the rest of your developer platform,\" says Kenny.\n\nSo go for it!\n\nThe three pillars of observability on GitLab are ripe for iteration, and there is still so much creative potential for each of these tools. We look forward to seeing what you come up with in 2020!\n",[745,9],{"slug":5107,"featured":6,"template":680},"observability","content:en-us:blog:observability.yml","Observability","en-us/blog/observability.yml","en-us/blog/observability",{"_path":5113,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5114,"content":5120,"config":5125,"_id":5127,"_type":14,"title":5128,"_source":16,"_file":5129,"_stem":5130,"_extension":19},"/en-us/blog/observations-on-how-to-iterate-faster",{"title":5115,"description":5116,"ogTitle":5115,"ogDescription":5116,"noIndex":6,"ogImage":5117,"ogUrl":5118,"ogSiteName":667,"ogType":668,"canonicalUrls":5118,"schema":5119},"Why iteration helps increase the merge request rate","How the Monitor:Health team has been able to increase the merge request rate using better iteration, a bias for action, and by writing things down.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666603/Blog/Hero%20Images/book.jpg","https://about.gitlab.com/blog/observations-on-how-to-iterate-faster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why iteration helps increase the merge request rate\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2020-05-06\",\n      }",{"title":5115,"description":5116,"authors":5121,"heroImage":5117,"date":5122,"body":5123,"category":743,"tags":5124},[3556],"2020-05-06","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-05-21.\n{: .alert .alert-info .note}\n\nDo you know much about fighter jets? It's okay if you don't, neither did I until I became a software developer. While it seems like a rather strange set of things to see a correlation with, they are intrinsically related through a man named [John Boyd](https://en.wikipedia.org/wiki/John_Boyd_(military_strategist)) who was a military strategist and a fighter pilot.\n\nBoyd was rather famous in the Air Force for a law he coined, which we're going to use to demonstrate the difference between iterative and recursive approaches to software development, why we favor it in the [Monitor:Health team](/handbook/engineering/development/ops/monitor/respond/) and why you might want to favor it too.\n\n_Boyd's Law of Iteration states that **speed** of iteration beats quality of iteration_\n\nThis law was developed by Boyd while observing dogfights between MiG-15s and F-86s. Even though the MiG-15 was considered a superior aircraft by aircraft designers, the F-86 was favored by pilots. The reason it was favored was simple: in one-on-one dogfights with MiG-15s, the F-86 won nine times out of ten.\n\nWhat's happening here? If the MiG is the better aircraft, why would the F-86 win the majority of the fights? Well according to Boyd who was one of the best dog-fighters in history suggested:\n\n> That the primary determinant to winning dogfights was observing, orienting, planning, and acting **faster** not better.\n\nThis leads to Boyd's Law of Iteration: Speed of iteration beats quality of iteration. What's pretty incredible is that you will find this same scheme throughout every section of modern software development:\n\n- Writing unit tests? Keep them small and lean so they can be run faster.\n- Writing usability tests? They work best when they're lean and you can quickly discard what's not working.\n- Writing a function, class, or feature? Start with the smallest, [most boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions) and iterate.\n- Doing an Agile approach? The quicker the better you'll often find.\n- Software in general is about failing early and often.\n\nSo lets pretend I've convinced you with some obscure fighter jet references and now you're ready to break down those merge requests and iterate quicker than you've ever iterated. Awesome! Let's talk about how to foster a team environment that allows for iteration, because that's the key here at GitLab. When you get started on this pilgrimage to [11 amazing merge requests per month as a goal](/handbook/engineering/development/performance-indicators/#mr-rate) you need to keep one very important thing in mind:\n\nIt's a team effort. While you as an individual developer will do an amazing job by hammering in on this skill, the real difference is made when you look at iteration as a tool to lift the team up. Think of yourself as the pilot that wants to get that faster iteration in to cover your buddies.\n\n## Bias for action\n\nWhen I got started at GitLab I was introduced to the idea of really believing in iteration as a methodology because it's a [company value](https://handbook.gitlab.com/handbook/values/#iteration).\n\n> Decisions should be thoughtful, but delivering fast results requires the fearless acceptance of occasionally making mistakes.\n\nThis was highlighted in various ways by different people across the company, but something that really stuck out to me was hearing another team member refer to the Monitor:Health team as a \"team with a strong bias for action\". We don't really believe in being reactive, instead we want to be we want to always be proactively improving the product. This underlying belief system trickles down from our team leader into every discussion, decision, deliverable set, and ultimately, how we as developers see our own agency operating. We **believe** in action, that an open merge request (even if it's not perfect) is always better than nothing.\n\nAs we mentioned, we have a bias for action. So, when our team anticipates a problem, we create a merge request first before starting a discussion. I know for a lot of people this might seem a bit counterproductive – what if this is a wasted effort? When in reality, [starting at a merge request](/handbook/communication/#start-with-a-merge-request) is the best possible place for any real discussion. It helps create a living log for the conversation, and creates more visibility for the problem we are fixing.\n\n## All code is bad code: Impostor syndrome, course correction, and accepting failure\n\nI had a mentor at my old company who was a fantastic programmer, and many of the people on my team looked up to him. One Friday afternoon, he gave a presentation that really shaped my understanding of iteration. This talk,  \"All code is bad code\" became rather famous in our small team because he mostly spoke about why the majority of the code he had written himself was ultimately bad code, and how the desire to **appear** smart is the number one barrier for people to become great software developers.\n\n> What you make with your code is how you express yourself, not the code itself - Eric Elliott\n\nProgramming is by its very nature difficult. As humans we're not particularly well-suited for deep and abstract logical thinking – our brains simply don't work like that by default and it's a learned skill for the most part. Being reminded of this is a humbling but freeing experience as it helps you move forward without fear. Every merge request you submit should be high quality but your definition of high quality should shift to mean delivering something useful to an end user.\n\nAt GitLab, we accept our limitations in that we might not know everything about the problem we're trying to solve. Instead, we lean heavily into the idea of the smallest, most [boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions) that can be expanded upon quickly by collaborating with our team.\n\n> Our bias for action also allows us to course correct quickly.\n\nWe always accept there will be [uncertainty](https://handbook.gitlab.com/handbook/values/#accepting-uncertainty) in what we do as software developers but we don't let that stop us from trying to deliver an amazing product to our users.\n\nWhen we create a merge request, we do so with a [low sense of shame](https://handbook.gitlab.com/handbook/values/#low-level-of-shame) and [no ego](https://handbook.gitlab.com/handbook/values/#no-ego). This approach allows us to deliver fearlessly **even if we're wrong**.\n\nAs a team, this is the environment you want to foster because it helps create a wonderfully positive feedback loop: Low sense of shame > many merge requests submitted > more discussion > many iterations > ideally, the best possible collaborative results for the end user.\n\nThe core takeaway for team leaders is that **it's okay to make mistakes**. The best thing you can do as a team leader is to foster a safe place for developers to make mistakes and learn as they go.\n\nIf you're a developer, remember that **it's okay to make mistakes as long as you strive for course correction**.\n\n## Foster a healthy sense for urgency for writing things down\n\n> \"While you're thinking about doing it... just do it.\"\n\nIt's one of the things we do so well at GitLab in general it's writing things down. Documenting as we go is how we help our teampick up and go without needing to waste time on unnecessary communication.\n\nIt's safe to say that with our GitLab handbook being at [2,500,000 words](/handbook/about/#count-handbook-pages) and counting, the folks here take writing things down pretty seriously.\n\nAt GitLab, we believe this is also the path to a higher merge request rate.\n\nOn the Monitor:Health team and throughout GitLab believe in preserving our energy, capturing valuable conversations, and making them public to dispense this knowledge widely. As a new team member, I've seen this in action multiple times now. Over the course of my eight weeks at Gitlab, I can count on one hand the number of times I've had to ping a team member with a questions I could not find an answer to in our documentation. The discipline for keeping these notes really keeps the focus on delivering results since we don't have an excess of energy spent going back and forth with questions.\n\nIn my first four weeks at GitLab almost every single question I needed a answer to was already covered in the documentation someone else had already gone to the trouble of creating. Here is a list of some of my initial questions and links to the answers in GitLab documentation.\n\n- [How do I set up the local GitLab Development Kit?](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/master/doc/howto/auto_devops/tips_and_troubleshooting.md)\n- [How do I set up the GitLab Development Kit with Prometheus?](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/master/doc/howto/prometheus.md)\n- [How do I use embedded charts via Prometheus and Grafana?](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#embedding-gitlab-managed-kubernetes-metrics)\n- [How do I use the `@gitlab/ui` components?](https://gitlab.com/gitlab-org/gitlab-ui/-/blob/master/README.md)\n- [How do I handle styling in external projects?](https://gitlab.com/gitlab-org/gitlab-ui/-/blob/master/doc/css.md)\n- [How should components look and act on pages I am developing?](https://design.gitlab.com/)\n\nIf you can encourage your team to document solutions as problems arise, it can help developers deliver more.\n\n> Documentation is a love letter that you write to your future self. - Damian Conway\n\n## Tighten those feedback loops\n\n> Keep what works, disregard what doesn't.\n\nYou'll often notice that the feedback loop for tight-knit teams just gets tighter over time. People start to see patterns of what does and doesn't work as they work together over time. A good team should aim to address these patterns by keeping the ones that work and refining them but also by not being afraid to disregard the ones that don't work.\n\nRecently, the Monitor:Health team [delivered the first iteration of an incident management tool called the Status Page](https://about.gitlab.com/blog/how-we-built-status-page-mvc/). The team did an amazing job on the  [Status Page](https://gitlab.com/gitlab-org/status-page), with each team member really aiming to break problems into their smallest pieces and iterate quickly, which kept the overall merge request rate high for this project.\n\nThe post mortem of the development process is what made the biggest different. We came together as a team to discuss what aspects worked well and which aspects didn't with the end goal being to tighten our feedback loops so people can really work autonomously and asynchronously. It takes a lot of bravery to have a critical discussion about what didn't work publicly, and not just focus on all the things you have done well.\n\nHow does this play out? Well for us on the Monitor:Health team, it means getting better at refining issues to ensure that when they receive a `ready for development` label they are **truly** ready for anyone to pick up at any time and take it all the way to done. This really helps increase the overall merge request rate because developers don't need to sit through one to three feedback loops waiting for their questions to be answered, when they could be getting it done.\n\nFor an issue to have a [`ready for development` label](/handbook/product-development-flow/#build-phase-2-develop--test) it needs to have:\n\n- A clear definition of \"done\"\n- All the necessary conversations are already resolved inside the issue\n- Developer defines a clear set of expectations\n- Say whether tests are required\n- Say whether UX is needed\n\nWe are trying to enable **any** developer on the Monitor:Health team to read an issue with zero preexisting context and deliver a merge request related to the issue without needing to leave that issue. Remember, we're trying to [measure results not hours](https://handbook.gitlab.com/handbook/values/#measure-results-not-hours). The less time someone spends asking questions, the more time they can spend delivering results.\n\n> Hail to the issue, baby! - Duke Nukem if he was a software developer at GitLab\n\n## It's all about the team\n\nThe only reason we are able to create this level of velocity inside GitLab is because of the belief that we can and **should** iterate quickly. By having the support of the team across the main points in how to iterate, i.e., bias for action, low sense of shame, a healthy sense of urgency, and tight feedback loops is the bedrock that allows us to deliver results for customers via a better product.\n\nWell, that's all folks! I hope you enjoyed the read and learned something along the way. If you have any questions or want to suggest an improvement, drop me an email at: `doregan@gitlab.com`.\n\nWhen in doubt, iterate faster.\n\n## TL;DR, show me the proof\n\n![Results](https://about.gitlab.com/images/blogimages/iterate-faster/results.png){: .center}\n\nThe Monitor:Health frontend team has grown over time while increasing average merge request rate. The team's merge request rate reflects the current team size of four people.\n\n## Learn more\n\n- [GitLab Values](https://handbook.gitlab.com/handbook/values/)\n- [Boyds Law](https://blog.codinghorror.com/boyds-law-of-iteration/)\n- [All code is bad](https://www.stilldrinking.org/programming-sucks)\n- [Accepting failure](https://www.youtube.com/watch?v=UxvXgmZf6NU)\n\n[We're hiring](/jobs/) at GitLab, or consider [trying us out](/free-trial/) for free.\n\nCover image by [Aaron Burden](https://unsplash.com/photos/G6G93jtU1vE) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,1440],{"slug":5126,"featured":6,"template":680},"observations-on-how-to-iterate-faster","content:en-us:blog:observations-on-how-to-iterate-faster.yml","Observations On How To Iterate Faster","en-us/blog/observations-on-how-to-iterate-faster.yml","en-us/blog/observations-on-how-to-iterate-faster",{"_path":5132,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5133,"content":5138,"config":5144,"_id":5146,"_type":14,"title":5147,"_source":16,"_file":5148,"_stem":5149,"_extension":19},"/en-us/blog/old-runners-stop-working",{"title":5134,"description":5135,"ogTitle":5134,"ogDescription":5135,"noIndex":6,"ogImage":2010,"ogUrl":5136,"ogSiteName":667,"ogType":668,"canonicalUrls":5136,"schema":5137},"Breaking change: Support for Runners prior to 9.0 will be removed imminently","With the removal of deprecated CI API v1, runners older than 9.0 will stop working with GitLab 10.0","https://about.gitlab.com/blog/old-runners-stop-working","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Breaking change: Support for Runners prior to 9.0 will be removed imminently\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fabio Busatto\"}],\n        \"datePublished\": \"2017-09-04\",\n      }",{"title":5134,"description":5135,"authors":5139,"heroImage":2010,"date":5141,"body":5142,"category":675,"tags":5143},[5140],"Fabio Busatto","2017-09-04","\n\nThis month, when we release GitLab 10.0, **deprecated runners will not be able to communicate with the system anymore**, since they rely on an old version of the API that will be removed.\nAll runners with version 9.0 or newer will continue to work as usual without any modification.\nWe encourage all of our users who still have old runners deployed to **upgrade them to the latest version as soon as possible** to avoid any downtime.\n\n\u003C!-- more -->\n\nIn the GitLab 9.0 release post, we announced that previous runners have been [officially deprecated](/releases/2017/03/22/gitlab-9-0-released/#gitlab-runner-deprecation), and the support for them would have been eventually dropped in a future release.\nWith another specific [blog post](/releases/2017/04/10/upcoming-runner-changes-for-gitlab-dot-com/) back in April, we also announced that we migrated our shared runners on GitLab.com, and which are the great benefits in upgrading to the latest version, and we started a process to dismiss support for any version prior to 9.0.\n\n## When will this happen?\n\nGitLab 10.0 will be released on September, 22nd. Please consider that old runners connected to GitLab.com will stop working as soon as the first RC gets deployed to production, and this will happen around September 8th.\n**Be sure that you upgrade all your runners before that date**.\n\n## Which versions are affected?\n\nAll runners with a version older than 9.0 will stop working with GitLab 10.0, as they rely on old API that will be removed in this release. This means that you can continue using your old runners with any GitLab version up to 9.5, even if it is not suggested. Upgrading GitLab to 10.0 or above will require upgrading the runners as well.\n\n## How can I check if I have old runners still active?\n\nIf you are an Admin of a GitLab instance, you can find the list of shared runners under **Admin area ➔ Overview ➔ Runners**. Check the **Version** column to find if you have runners older than 9.0.\n\nIf you are Owner or Master for a project, go to **Settings ➔ CI/CD** (or **Settings ➔ Pipelines** if you are using the old navigation) and click on each of the runners you may find under **Specific Runners** to see the version.\n\n## How can I upgrade an old runner?\n\nRunners can be upgraded to the latest version following [these instructions](https://docs.gitlab.com/runner/#install-gitlab-runner). After the update, the runner should start working again as before, even better!\n",[1212,675,9],{"slug":5145,"featured":6,"template":680},"old-runners-stop-working","content:en-us:blog:old-runners-stop-working.yml","Old Runners Stop Working","en-us/blog/old-runners-stop-working.yml","en-us/blog/old-runners-stop-working",{"_path":5151,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5152,"content":5158,"config":5163,"_id":5165,"_type":14,"title":5166,"_source":16,"_file":5167,"_stem":5168,"_extension":19},"/en-us/blog/one-third-of-what-we-learned-about-ipos-in-taking-gitlab-public",{"title":5153,"description":5154,"ogTitle":5153,"ogDescription":5154,"noIndex":6,"ogImage":5155,"ogUrl":5156,"ogSiteName":667,"ogType":668,"canonicalUrls":5156,"schema":5157},"Everything we learned about IPOs in taking GitLab public - Part 4","GitLab co-founder and CEO Sid Sijbrandij shares insights about the process of going public.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671861/Blog/Hero%20Images/gitlab-logo-500.jpg","https://about.gitlab.com/blog/one-third-of-what-we-learned-about-ipos-in-taking-gitlab-public","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Everything we learned about IPOs in taking GitLab public - Part 4\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2022-10-14\",\n      }",{"title":5153,"description":5154,"authors":5159,"heroImage":5155,"date":5160,"body":5161,"category":675,"tags":5162},[762],"2022-10-14","\nIt was this time last year that GitLab (NASDAQ: GTLB) went public and was the first company to [publicly live stream](https://vimeo.com/650088717?embedded=true&source=vimeo_logo&owner=115027220) the entire end-to-end listing day at Nasdaq. To celebrate our 1 year anniversary, I shared an overview of what we learned through our S-1 filing and initial public offering (IPO) process with Sifted, a media outlet focused on topics for startups and innovators (and invested in by the venerable Financial Times), in a three-part series:\n\n1. [Going public in the US? This is the most important document in the process](https://sifted.eu/articles/gitlab-part-one-going-public-us/)\n2. [‘More cowbell!’: Publicly livestreaming GitLab’s Nasdaq listing day & celebrating](https://sifted.eu/articles/gitlabs-nasdaq-listing-part-two/)\n3. [Powered by cookies, not airplanes: Pricing and allocating IPO shares](https://sifted.eu/articles/gitlab-part-three-allocating-ipo-shares/)\n\nBut there is so much more to share around preparing the S-1 filing and initial steps for setting the IPO in motion, including how to work with insurance providers, what to expect from your board, and more - all of which I am including in this blog post.\n\nPart 4 of the series below will cover these areas.\n\n![GitLab team celebrating IPO](https://about.gitlab.com/images/blogimages/teamnasdaq.jpg){: .shadow} \nTeam members celebrating in NYC and remotely\n{: .note.text-center}\n\n## Preparing the S-1 filing\n\nTo get started, here are some things we learned throughout the GitLab IPO:\n\n- **Cheap stock**: We learned that it is common when the SEC reviews the IPO filing to comment on [“cheap stock.”](https://www.pwc.com/us/en/services/consulting/deals/library/cheap-stock.html) Cheap stock refers to equity awards issued to employees ahead of an IPO at a value far less than the IPO price. Cheap stock issues can delay an IPO or stock listing and may result in a cheap stock charge, which is an incremental and often unforeseen stock-based compensation expense. Cheap stock concerns can impact the company’s registration timeline, so it is important to ensure that it is clear to the SEC how your company has been assessing fair-market value for stock-based compensation issued prior to the potential IPO. We reviewed our assumptions we used for valuing the stock for granting and determined our assumptions of the timing of the IPO should have had a higher weighting and took a charge to the company but not to team members.\n\n- **Physical addresses not necessary**: Physical addresses aren’t necessary to file for an IPO. We have been a 100% remote workforce since inception and, as of July 31, 2021, had approximately 1,350 team members in over 65 countries. Operating remotely allows us access to a global talent pool, providing a strong competitive advantage. We wrote [Address Not Applicable in our S-1 filing](https://www.sec.gov/Archives/edgar/data/1653482/000162828021018818/gitlab-sx1.htm#:~:text=Employer%20Identification%20Number) where the address was requested. Initially we received a comment from the SEC regarding an address where investors could send communications to the company, but after providing an explanation about being 100% remote we were able to use the email address reach.gitlab@gitlab.com in the footnote on the cover page.\n\n- **Work remote-first with your S-1 drafting process**: Typically, drafting the S-1 is done in-person over many weeks. The process would involve going to the \"financial printer\" and sitting in a room together and flipping through hardcopy pages one by one. (In San Francisco, the most commonly used financial printer is situated near a sushi restaurant and it’s a custom to convene for sushi afterwards.) Even during the pandemic, some companies were still meeting in person in small groups. We drove a highly efficient process that minimized travel using Zoom, Slack, Workiva, and Google Workspace that spanned just three weeks for our initial S-1 draft. Even auditor reviews were handled remotely. This would typically require a combination of management, outside counsel, and the bankers passing drafts back and forth. Instead, we hosted real-time drafting sessions over Zoom and used shared Google Docs with multiple stakeholders doing real-time editing. We followed the [GitLab process](https://handbook.gitlab.com/) and the way the company works remotely for the S-1. Finally, because we didn’t hold meetings in person, we were able to pull in SMEs (subject matter experts) from throughout the legal and finance teams to answer questions during the diligence process with the bankers. At other companies, this process would have been handled by the Chief Legal Officer and the Chief Financial Officer. This leant itself to more diversity of thought than would typically be possible when constrained by the size of a meeting room. (The one obvious downside is that we didn’t get together afterwards for sushi.)\n\n- **Efficient process for responding to SEC comments**: When you file an S-1 confidentially, the SEC routinely [provides comments back](https://www.sec.gov/divisions/corpfin/cffilingreview). These comments are expected. The S-1 filing is intended to create market transparency by educating all investors. Comments from the SEC seek to ensure that a S-1 is in-depth enough to make investors feel informed. We were able to address the initial 16 comments (an unusually small number) from the SEC and refile quickly. We responded to the first set of comments in one week. This is quite fast to respond to an initial set of comments – 2 weeks is more typical.\n\n- **Founder letter**: These are common in S-1 documents. Most are one or two pages. My [founder letter](/blog/gitlab-inc-takes-the-devops-platform-public/#foundersletter) is longer at 4 pages (though Google’s 2004 letter is over twice as long based on word count). It included a [10-point plan to maintain our startup ethos](https://www.sec.gov/Archives/edgar/data/1653482/000162828021020056/gitlab-424b4.htm) (page 96) inspired by [Amazon’s Day 1 letter](https://s2.q4cdn.com/299287126/files/doc_financials/annual/Shareholderletter97.pdf) explained in a [blog post](https://aws.amazon.com/executive-insights/content/how-amazon-defines-and-operationalizes-a-day-1-culture/) and repeated verbatim in every annual filing since.\n\n- **File the S-1 confidentially**: Form S-1 is a filing required by the U.S. Securities and Exchange Commission for companies planning on going public. Public filings often lead to unsolicited public speculation about the company. Thanks to the [JOBS Act](https://www.sec.gov/spotlight/jobs-act.shtml), if your company meets certain requirements, you can confidentially submit the S-1 form. If your company decides not to go forward with an investor roadshow and IPO, the confidentiality preserves optionality. \n\n- **Know when to be quiet**: There is a [specific quiet period window](https://www.investor.gov/introduction-investing/investing-basics/glossary/quiet-period) leading up to the IPO  and continuing after the listing day when team members and people affiliated with your company (ex. board members) cannot be perceived as hyping the company. We were advised as a best practice to start our Quiet Period once we selected bankers for our IPO. The Quiet Period then continued through the 25 days after our stock started being publicly traded, which included the day of the IPO. It’s important to ensure compliance with laws and regulations governing the IPO and being a public company even before the company is public. The road to IPO is littered with horror stories and unintentional consequences as a result of [“gun jumping”](https://www.investopedia.com/terms/g/gunjumping.asp#:~:text=Gun%2Djumping%20flouts%20the%20rule,its%20IPO%20will%20be%20delayed.). This refers to selectively using financial information that has not been publicly announced. Delaying initial public offerings when companies are ready to go public can significantly disrupt innovation and the negative effects can last for years. One internet giant risked a delayed IPO when an interview granted to Playboy magazine months prior (disclosing key factors about their business) was later published during their quiet period. Another prominent San Francisco-based tech company had its IPO delayed when the CEO granted an interview for an article appearing in the New York Times that the SEC found to violate gun jumping rules. To minimize the risk of violating such laws and regulations, we followed best practices to limit statements to the IPO registration statement and vetted and approved press releases and started vetting our communications as though we were a public company months if not a full year or more before we actually went public. This is because during the IPO process the SEC may scrutinize every statement made by the company or individuals on the company’s behalf, even simple ones. The more communications, the greater the risk of saying something that shouldn’t be said.\nFor example, I couldn’t respond to people who sent their congratulations publicly on social media the day we listed. However, if you look at the [#EveryoneCanContribute hashtag](https://twitter.com/search?q=%23everyonecancontribute&src=typed_query), you’ll notice we did have a flurry of team member celebration tweets on October 14, 2021. To ensure compliance, celebration tweets were pre-written by our communications team and approved by our Legal team.\n\n![GitLab branding in NYC](https://about.gitlab.com/images/blogimages/nycnasdaq.jpg){: .shadow} \nGitLab branding outside the Nasdaq building in Times Square\n{: .note.text-center}\n\n## Setting the IPO in Motion \n\nOur banking partners who were experienced in IPOs commented that it was one of the most efficient S-1 drafting processes that they’ve seen. We were happy that this process, which typically takes six months, happened in four. To set up a right foundation for a successful IPO requires that the right processes and people (internally and externally) are in place:\n\n**Be transparent with Directors and Officers (D&O) insurance providers**. Directors and Officers insurance is expensive and the institutions which provide these services bid for your business after learning about your company through their own research as well as presentations and time spent with company representatives, usually from the Legal and Finance teams. We were unsure how our transparency would be perceived by the D&O insurers. However, our public [handbook](https://handbook.gitlab.com/) made it easier for D&O insurance providers to understand our business and processes. The GitLab Legal team created a bug bounty program that gave all team members a way to contribute to public company readiness by assisting in spotting and fixing “bugs” in our handbook. Bug bounty participants were rewarded with company swag. \n\n**Some board members might leave you**. Once a company IPOs, board members are subject to restrictions on their overall trading activities (e.g. tighter trading windows) with regard to the company’s stock. Due to these restrictions, earlier board members/investors may shift off the board, as new board members come on. This can add fresh perspectives on the board and help guide the company during the important post-IPO growth stage\n\n**Analysts depend on the bank you pick**. Banks that help with IPOs will make [analysts available](https://www.investopedia.com/articles/financialcareers/11/sell-side-buy-side-analysts.asp) to cover your company. Therefore, we looked for banks that were associated with analysts whom we wanted to cover GitLab. This is significant as it supports increased brand and marketing awareness. Once that’s determined, you should consider analyst coverage when selecting additional banks to help with your IPO. \n\n**Lead-left bank**. The lead-left bank, also called the managing underwriter, is listed first among the other underwriters, in the upper left-hand corner of the cover page of the S-1 filing. In our case it is Goldman Sachs per our [S1 cover page](https://www.sec.gov/Archives/edgar/data/1653482/000162828021018818/gitlab-sx1.htm#:~:text=Employer%20Identification%20Number). Getting left placement is a big deal because it means the bank receives the largest percentage of the deal allocation and generally leads the process from the banking side. Their industry reputation reflects on the company choosing them for this role. You will have several other banks involved to spread the risk of underwriting, reduce single bank exposure, and lower financial commitment to the IPO.\n\n**SAFE Framework**. We worked hard to educate team members early on to ensure they were empowered to make responsible decisions as a public company. Our SAFE framework is an acronym and mnemonic for how team members should think about transparency and what they can share publicly. (It stands for Sensitive, Accurate, Financial, and Effect.) GitLab team members have embraced the [SAFE Framework](https://handbook.gitlab.com/handbook/legal/safe-framework/) including creating a SAFE Slack channel staffed by our Legal team where team members can seek answers as well as flag things that are of concern. In terms of company communications, when we want to keep something internal, we say, “Keep this information SAFE.” We’ll also put this flag in decks, videos, Slack messages, and other communications. It is also a required part of our onboarding and training process. We’ve even created a SAFE Slack emoji:\n\n![:safe-tanuki:](https://about.gitlab.com/images/blogimages/safetanuki.png)\n\n**Reg FD training**. In addition to our SAFE framework, to prepare our team members we also took into account that we are a geographically diverse group, with more than a third of our company based outside of the U.S. We wanted to be mindful that not everyone would be familiar with U.S. Securities laws and may not understand some of the requirements GitLab would be subject to as a public company. This is why we created and had all team members go through Regulation Fair Disclosure (Reg FD) training as well as How to Avoid Insider Trading training. (We also have this training set up to recur annually.) We are not aware of another company that trains their entire company on Reg FD, as it is usually just provided to certain individuals who are authorized to speak on behalf of the company. \n\n**Timing an IPO**. The timing of an IPO requires a mixture of art and science. There are a number of conversations between the company’s retained investment bankers and buy side investors surrounding market conditions. An element of this involves the company’s investment bankers learning in which types of companies these investors may be interested. For example, if the growth rate of a potential new IPO is less than X, and/or the new IPO is unprofitable, then there may be no appetite for that particular IPO and naturally, a better outlook would likely inspire greater interest. Through continuous conversations, overall investor appetite is gleaned. Then it comes down to picking a specific day of the week and time of year, avoiding holidays. Companies must consider a time in which the most investors are available and paying attention. IPO days typically take place Tuesday through Thursday. And they don’t tend to be priced in the summer as investors are usually on vacation and not paying as much attention to the market. Labor Day through Thanksgiving is a popular time for IPOs. You also want to be mindful of the timing of your IPO relative to quarterly results as you want investors to consider your next fiscal year as the basis of valuing your company.\n\nWhen choosing a date for GitLab, we knew if we waited until after October 31, 2021, we would need to re-file, because of the filing date of our S-1 filing. We took all of these factors into consideration and chose October 14, 2021, as our IPO day. The date was serendipitous as GitLab’s [Friends & Family Day](https://handbook.gitlab.com/handbook/company/family-and-friends-day/) took place on Friday, October 15, 2021, and the company was also celebrating its 10 year anniversary in that time frame since the first commit to the GitLab open source project took place on October 8, 2011.\n\n**Bring down call.** Each time a company is about to file an amended Form S-1, investment bankers and attorneys gather on a “bring down” call. During this call, attorneys will ask a series of questions about material information, permissions, security, risks, concerns, etc., with the goal to achieve an “all clear.” With each new call, they’ll ask if the company has anything materially new to disclose. This was all done remotely.\n\n**Securing the Opening Bell.** Choosing the opening bell is generally preferred over the closing bell to provide a full day of celebration. We approached our listing day as a marketing event and a way to celebrate with team members and contributors globally, so securing the opening bell was important. This would allow us to reach the maximum amount of time zones. If you have a date in mind and stick with that date in the days leading up to the listing, you’ll be more likely to attain the opening vs. closing bell. \n\nWhile the timing at the moment for IPOs may not be in many companies’ favor, I know many amazing companies have been founded during times of economic uncertainty, such as Electronic Arts (1982) and Slack (2009). I’m looking forward to seeing the next generation of innovative ideas come to market and experience the same growth and excitement that we were able to capture and I hope that this educational series may help them when the time is right.\n\nThank you again, sincerely, to everyone who helped us along the road.\n",[9,277,675],{"slug":5164,"featured":6,"template":680},"one-third-of-what-we-learned-about-ipos-in-taking-gitlab-public","content:en-us:blog:one-third-of-what-we-learned-about-ipos-in-taking-gitlab-public.yml","One Third Of What We Learned About Ipos In Taking Gitlab Public","en-us/blog/one-third-of-what-we-learned-about-ipos-in-taking-gitlab-public.yml","en-us/blog/one-third-of-what-we-learned-about-ipos-in-taking-gitlab-public",{"_path":5170,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5171,"content":5176,"config":5182,"_id":5184,"_type":14,"title":5185,"_source":16,"_file":5186,"_stem":5187,"_extension":19},"/en-us/blog/open-source-analytics",{"title":5172,"description":5173,"ogTitle":5172,"ogDescription":5173,"noIndex":6,"ogImage":1103,"ogUrl":5174,"ogSiteName":667,"ogType":668,"canonicalUrls":5174,"schema":5175},"4 Examples of the power of open source analytics","Our Data and Analytics team manager reflects on how open source and radical transparency has benefited analytics work at GitLab.","https://about.gitlab.com/blog/open-source-analytics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 Examples of the power of open source analytics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor Murphy\"}],\n        \"datePublished\": \"2019-04-15\",\n      }",{"title":5172,"description":5173,"authors":5177,"heroImage":1103,"date":5179,"body":5180,"category":1517,"tags":5181},[5178],"Taylor Murphy","2019-04-15","\nOne of the great parts of working for a company with such a strong [open source](/solutions/open-source/) ethos is that\nyou're able to apply this philosophy to other parts of the company. We on the Data Team\nhave worked hard to embody the [values of GitLab](https://handbook.gitlab.com/handbook/values/),\nparticularly collaboration and transparency.\n\nIt starts by defaulting to public for everything. Our [primary code repository](https://gitlab.com/gitlab-data/analytics/)\nis public and MIT licensed, meaning anybody can contribute or just take what they find useful.\nOur code, issues, and [documentation](/handbook/business-technology/data-team/) are public.\n\n## This radical transparency has had several positive side effects\n\n### The effect I'm most excited about is having people contribute to our codebase.\n\nWhen we were migrating to Snowflake for our data warehouse, we needed to convert our SQL code\nthat was specific to PostgreSQL to a Snowflake-compatible format.\nOne of the models in our codebase [generates a table](https://dbt.gitlabdata.com/#!/model/model.gitlab_snowflake.date_details) of dates and related metadata such as day of year, week of year, quarter, etc.\nAn external contributor, [Matthias Wirtz](https://gitlab.com/swiffer), who had been following our\nproject and the [Meltano](https://meltano.com/) project, took it upon himself to make the\nupdate and create a merge request in our project. We went back and forth a bit with code review\nand testing, but eventually [it was merged](https://gitlab.com/gitlab-data/analytics/merge_requests/476/diffs) and we now rely on this code today!\n\n### Another great benefit is that it makes conversations easier within the analytics community.\n\nA key part of our data stack is data build tool, or [dbt](https://www.getdbt.com/) for short.\nThis is a powerful open source project that makes version controlling and executing SQL code easy.\nThe company behind the project, [Fishtown Analytics](https://www.fishtownanalytics.com/),\nhosts a great community on [Slack](https://slack.getdbt.com/). I've been able to answer basic\nquestions about project structure, documentation, and testing just by linking to our codebase and\n[dbt-generated docs](https://dbt.gitlabdata.com)\ncountless times, and the feedback is always positive. We see people who are shocked that\nwe're so open but also appreciative that they can poke around a production codebase with ease.\n\n### An additional benefit that we've seen is that by putting everything out in the open we're helping to drive the industry forward.\n\nIt's one thing to say \"Here's what we're doing, but sorry you can't see the code\" versus\n\"Here's what we're doing, here's _how_ we're doing it, and what are your ideas to make it better?\"\nThe latter invites people into the conversation to build upon ideas and others' creations.\n\n### The last piece I want to highlight is the idea that the actual code that you use for analytics isn't your company's competitive advantage.\n\nYou could know exactly how we move, store, model, and analyze our data, and its utility for a\ncompetitor would primarily be to get their own analytics off the ground.\nThe real value is the data itself and the decisions people make from the results of your analyses.\nWe, of course, protect our data and our customers' data, but there's no reason why people\nshouldn't be able to see how we _use_ that data to make decisions. And, being a transparent company,\nwe're very open about the decisions we make as well.\n\nOverall, we're seeing the same transformation that software engineering underwent with the [DevOps\nmovement](/topics/devops/) happen in the analytics world, only with about a five-year lag.\nMore open source tools are being created for data teams every day, and more people are sharing\nhow they build their stacks and analyze their data. At GitLab, we're betting that our [core values](https://handbook.gitlab.com/handbook/values/)\ncan bring emergent positive benefits to every part of a company, including data teams!\nWe look forward to collaborating with you as this industry changes and grows!\n",[267,9,745],{"slug":5183,"featured":6,"template":680},"open-source-analytics","content:en-us:blog:open-source-analytics.yml","Open Source Analytics","en-us/blog/open-source-analytics.yml","en-us/blog/open-source-analytics",{"_path":5189,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5190,"content":5196,"config":5201,"_id":5203,"_type":14,"title":5204,"_source":16,"_file":5205,"_stem":5206,"_extension":19},"/en-us/blog/origin-of-devsecops-platform-category",{"title":5191,"description":5192,"ogTitle":5191,"ogDescription":5192,"noIndex":6,"ogImage":5193,"ogUrl":5194,"ogSiteName":667,"ogType":668,"canonicalUrls":5194,"schema":5195},"Disagree, commit, and disagree: How a lazy solution became a category","Find out the origin story of the DevSecOps category.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679881/Blog/Hero%20Images/flowercomingthroughsidewalkcrack.png","https://about.gitlab.com/blog/origin-of-devsecops-platform-category","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Disagree, commit, and disagree: How a lazy solution became a category\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2023-08-30\",\n      }",{"title":5191,"description":5192,"authors":5197,"heroImage":5193,"date":5198,"body":5199,"category":675,"tags":5200},[762],"2023-08-30","\nA few months ago, GitLab - and the DevOps Platform category - reached a big milestone. Two influential analyst firms, [Gartner](https://about.gitlab.com/blog/gitlab-leader-gartner-magic-quadrant-devops-platforms/) and [Forrester](https://about.gitlab.com/blog/gitlab-leader-forrester-wave-integrated-software-delivery-platforms/), issued reports that validate the market is moving from point solutions to a platform. They officially recognized DevOps platforms as a category. A category we created. \n\nThis is the story of how we did it.\n\nI am thrilled that we created a category. Very few companies are able to do so. Other examples are Dropbox for the file hosting service category; Hubspot for inbound marketing; and Slack for searchable logs of all communications and knowledge. The backstory is that we did not start with a vision for creating a category. GitLab didn’t even begin as a business. It started with a programmer’s need for a great open source collaboration tool. \n\nNow, nearly 12 years after GitLab’s [very first commit](https://gitlab.com/gitlab-org/gitlab-foss/-/commit/9ba1224867665844b117fa037e1465bb706b3685), I want to share what we learned on the journey to creating the DevOps Platform category.\n\n## Category design begins with solving your own problem\nDmitriy Zaporozhets needed a tool to collaborate with his team. His employer at the time wasn’t willing to buy the tool he wanted, so he decided to build it himself. He created GitLab in 2011 from his home in Ukraine.\n\nTogether with Valeriy Sizov, Dmitriy started to build GitLab as a developer collaboration tool based on Git. Developers from around the world quickly began using it. In the first year, 300 people contributed to improving it.\n\nGitLab was not founded with a grand plan or a 10-year vision to create a single platform for the entire software development lifecycle. The reality is that GitLab began with one person who had a need and built a solution to meet it.\n\n## Categories are discovered, not planned\nOne of the things I respect most about Dmitriy is that he built GitLab as open source, allowing others to use his ideas and build on them in their own ways. He was so committed to open source that he was supportive of me commercializing his work.\n\nI encountered GitLab for the first time in 2012. I recognized the value that it could provide for other software companies, but I also saw the challenges in installing and managing it. Not everyone had the means to do that. I saw the potential for GitLab to be commercialized as a SaaS business: cloud-based source code management (SCM) for everyone.\n\nI was nervous about commercializing Dmitriy’s work, so I reached out to tell him what I was working on. He was happy that what I was doing could help GitLab become more popular and attract even more community contributions, which it did.\n\nThis was our exchange: \n\n\n![Emails between Sid and Dmitriy](https://about.gitlab.com/images/blogimages/devsecopsoriginmessages.png){: .shadow}\n\n\nIn late 2012, similar to how Dmitriy made an SCM tool for his own need, he built his own continuous integration (CI) tool called GitLab CI, a tool that ran tests to check the code for conflicts.\n\nMeanwhile, large organizations began adopting GitLab, and Dmitriy tweeted that he wanted to work on GitLab full-time. I got in touch with him to work out an arrangement for him to join GitLab, the company. But when I went to the local Western Union branch to make a wire transfer, I had to convince the teller that I knew Dmitriy and was not falling victim to wire transfer fraud - a common issue at the time.\n\nWe then introduced [GitLab Enterprise Edition](https://about.gitlab.com/releases/2013/07/22/announcing-gitlab-enterprise-edition/) with features asked for by larger organizations. \n\nThen, in 2015, we noticed that a community contributor named Kamil Trzciński built a far better runner than we did (ours was in Ruby and single-threaded, his was in Go and multi-threaded). It was so much better that we decided to adopt his runner as the standard.\n\nThrough iteration, building on each other’s ideas, and being open to ideas from outside our company, we continued to build two great tools for SCM and CI. \n\nHowever, I admit that there were critical moments when our willingness to allow others to contribute would be tested. When Kamil joined GitLab full-time we could not have predicted that he would help us discover a new category. Not by contributing a better CI runner but by changing the way software is developed. \n\nKamil suggested a radical idea: to integrate GitLab SCM and GitLab CI into one tool.\n\n## Disagree, commit, and disagree\nDmitriy and I disagreed with Kamil. Dmitriy believed in the Unix philosophy where one program should do one thing well; if you want a program to do something else, start a new one. I thought that customers wanted separate tools for separate use cases. The market was filled with specialized point solutions.\n\nMany business leaders say, “Disagree and commit,” and we did. We disagreed, and committed to continuing to build two different products.\n\nBut Kamil persisted in making a strong case for [why SCM and CI should be integrated](https://about.gitlab.com/blog/gitlab-hero-devops-platform/). This is when our operating principle of [disagree, commit, and disagree](https://handbook.gitlab.com/handbook/values/#disagree-commit-and-disagree) was born. Every decision can be changed, and the best decisions should often be made despite management’s opinion.\n\nDmitry and I relented and took Kamil’s suggestion over our opinion and the opinion of the market. \n\nIt was a lazy choice because combining SCM and CI would mean having only one Ruby on Rails app to maintain. We could avoid duplicating the interface and the data, making it more efficient to develop code. But it also ended up being a far better user experience, giving customers a much faster way to set up CI, and faster cycle times by not having to switch between apps. GitLab became a platform with one UI, one data store, one way to serve up information, and one way for a company to collaborate and be on the same page at the same time.\n\nBy taking the suggestion of someone new to the team and creating [the world’s first DevOps platform](https://about.gitlab.com/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/), we changed the course of our company and, eventually, the whole software development industry. I am proud to be a part of the DevSecOps Platform story because it is a story about allowing everyone to contribute, especially when someone else has the best idea. \n\nIt is important to disagree and commit but still disagree. That is how Dmitriy and I realized that there could be one platform for the entire software development lifecycle, and eight years later, Forrester, Gartner, and the market see it, too.\n\nToday, we have a [DevSecOps platform](https://about.gitlab.com/platform/?stage=plan). \n\nLooking to the future, we hope to create another category: [AllOps](https://about.gitlab.com/company/vision/), a single application, for all R&D that includes DevSecOps, ModelOps DataOps, and Service Desk. \n\nIn the future, we will expand support for [ModelOps and DataOps](https://about.gitlab.com/direction/modelops/) to give customers the ability to manage data and its associated AI/ML models in a similar fashion to their software projects. \n\nAnd, because customers need the ability to triage application incidents directly where their applications are built and deployed, we will continue to expand our [Service Desk](https://docs.gitlab.com/ee/user/project/service_desk/index.html) offering.\n\nIt is GitLab’s mission to ensure that everyone can contribute. Our vision for AllOps moves us further in that direction - to deliver a single application for all innovation.\n",[675,9,475,1298],{"slug":5202,"featured":6,"template":680},"origin-of-devsecops-platform-category","content:en-us:blog:origin-of-devsecops-platform-category.yml","Origin Of Devsecops Platform Category","en-us/blog/origin-of-devsecops-platform-category.yml","en-us/blog/origin-of-devsecops-platform-category",{"_path":5208,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5209,"content":5215,"config":5220,"_id":5222,"_type":14,"title":5223,"_source":16,"_file":5224,"_stem":5225,"_extension":19},"/en-us/blog/our-journey-to-a-diverse-and-inclusive-workplace",{"title":5210,"description":5211,"ogTitle":5210,"ogDescription":5211,"noIndex":6,"ogImage":5212,"ogUrl":5213,"ogSiteName":667,"ogType":668,"canonicalUrls":5213,"schema":5214},"Our journey to a more diverse and inclusive workplace","GitLab is taking action to create a more equitable and representative workplace for underrepresented groups.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679254/Blog/Hero%20Images/dib_mit.png","https://about.gitlab.com/blog/our-journey-to-a-diverse-and-inclusive-workplace","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Our journey to a more diverse and inclusive workplace\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-06-16\",\n      }",{"title":5210,"description":5211,"authors":5216,"heroImage":5212,"date":5217,"body":5218,"category":299,"tags":5219},[672],"2020-06-16","\n\nDespite the talent of Black leaders, there are glaring disparities in representation of Black professionals in positions of power in a number of industries, including in tech. About 5% of tech professionals are African American, and less than 2% of tech executives are African American, according to data from the [Harvard Business Review](https://hbr.org/cover-story/2019/11/toward-a-racially-just-workplace).\n\nGitLab is not immune to diversity disparities in leadership. At GitLab, we have [zero Black or Latinx director-and-above leaders](/company/team/structure/#layers). Our CEO, [Sid Sijbrandij](/company/team/#sytses), is committed to fixing this by hiring or promoting internally by 2021, along with a host of other diversity, inclusion, and belonging (DIB) actions that were already in development to make GitLab a more diverse and inclusive company, where everyone feels a sense of belonging.\n\n\"The past few weeks have reinforced for me the importance of being an ally and taking action to create more diversity within our company,\" says Sid. \"Our executive team believes we can do more to ensure that the diversity of our workforce better reflects the diversity we see in the world.\"\n\n\"A key metric we are focused on is the number of Black senior leaders at GitLab,\" he adds. \"Currently, we have zero Black team members at GitLab in the role of director and above. This is not a number we are proud of, so we are taking action to correct this by 2021 through hiring or promotion. We will continue to iterate on diversity goals in order to have and maintain a diverse and inclusive leadership team.\"\n\nThis blog post is the first in a multi-part look at DIB, a series that was started well before the spring 2020 deaths of George Floyd, Breonna Taylor, Ahmaud Arbery, and countless others, sparked a global movement. Racism is a global issue, and the protests over these killings in the United States has been a catalyst for action globally, in and out of the workplace. We’ll make the business case for DIB another day; instead we’re going to jump right into what we’ve done and what we still need to do at GitLab. We hope that our transparency about our journey might make it easier for other companies on the same path. It will also hold us accountable.\n\nIn recognition of the abundance of talent in the Black community, and the lack of Black director-and-above leaders at GitLab, we have committed to fixing this diversity deficit by 2021 through hiring or promotion. Based on our [self-reported employee identity data](/company/culture/inclusion/identity-data/), we are also going to look at other groups that have low or no representation on our team, such as our Latinx and Native/Pacific Islander colleagues. Our plan is to be even more intentional and rigorous about recruiting and promoting to create a more diverse and inclusive leadership team, including Black, Latinx, and other underrepresented groups.\n\n## Start with values\n\nGitLab has been a company since 2014, and diversity, inclusion, and belonging have always been part of our core values. But it wasn’t a formal part of our business strategy until five years in, after we hired [Candace Byrdsong Williams](/company/team/#cwilliams3) as the diversity and inclusion partner in spring 2019. By hiring Candace, we felt we were making DIB a formal part of the company’s business strategy.\n\nResearch shows building a more inclusive workplace where everyone feels a sense of belonging is not just the right thing to do, it’s also good for business. The best way to reduce attrition of underrepresented groups and cultivate a truly inclusive workplace is to create opportunities for advancement.\n\nCandace’s role as the DIB partner is to help introduce diversity initiatives and help us create a more inclusive workplace, and there is no question that she’s made strides.\n\n\"We are now taking a deeper dive of iteration into key metrics with a closer focus on women in GitLab as a whole, women in management, and women in senior leadership,\" says Candace. \"We are paying close attention to the current trends, as well as attrition and trying to set realistic goals based on these metrics. Also as next steps, we are establishing our key metrics for race starting with Black team members using the same application with metrics.\"\n\nCandace is a team of one at GitLab, but she is making big changes to grow our DIB program. Here are just a few examples of past and upcoming projects Candace, and other internal partners, are working on:\n\n*   Established TMRGs (employee resource groups) with guidelines and continuing to stand up additional TMRGs\n*   Established the DIB Advisory Group at GitLab\n*   [Established DIB Trainings/Resources](/company/culture/inclusion/#diversity-inclusion--belonging-training-and-learning-opportunities)\n*   Conducted a DIB survey to get a pulse on how DIB is seen and felt at GitLab today\n*   Published the [Building an inclusive all-remote culture](/company/culture/inclusion/building-diversity-and-inclusion/) handbook page, which speaks to GitLab’s unique ability to create a more inclusive environment through all-remote work.\n*   [Established DIB timelines summary of events](/company/culture/inclusion/diversity-and-inclusion-events/)\n*   [Iterating on our mission statement](/company/culture/inclusion/#gitlabs-definition-of-diversity-inclusion--belonging): Established the mission statement with further iteration to now include \"Diversity, Inclusion, and Belonging.\" **Belonging** is acknowledgement of your voice being heard along with creating an environment where team members feel secure to be themselves.\n*   Established key metrics for underrepresented groups and continuing to iterate\n*   Conducts our monthly DIB company call for all to see what is going on in the DIB space at a high level\n*   Plans for a 2019 DIB report to capture our learnings from last year and continue to produce reports moving forward\n*   DIB Speaker Series: External speakers are invited to join GitLab team members in a discussion about DIB topics in Q2-Q3 2020\n*   A new issue board for TMRGs (e.g., [the Minorities in Tech Employee (MIT) TMRG issue board](https://gitlab.com/gitlab-com/diversity-and-inclusion/-/boards/1634805?label_name[]=TMRG)) to help track all the activities our TMRGs are doing\n*   Increase participation for existing and [new TMRGs](/company/culture/inclusion/erg-guide/)\n\n## It’s hard work\n\nThe fact is, building a more diverse and inclusive workplace takes time, effort, intentionality, and persistence. At GitLab, we’ve made [DIB one of our core values](https://handbook.gitlab.com/handbook/values/#diversity-inclusion), but that doesn’t mean our company is immune to the challenges.\n\nThrough research and conversation with leaders in the diversity, inclusion, and belonging space, we’ve summarized some common diversity and inclusion challenges and identified opportunities for growth in GitLab’s DIB strategy, but many of these decisions require resourcing and buy-in from leadership. Our team is reviewing these recommendations and will be considering these updates to our DIB strategy.\n\n**One of the most common challenges with diversity and inclusion is that the responsibilities are frequently allocated to a person of color who is responsible for diversity and inclusion along with their day job, according to [reporting in the HBR](https://hbr.org/cover-story/2019/11/toward-a-racially-just-workplace).**\n\n**The upside**: We’ve hired Candace, who is responsible for DIB at GitLab full-time.\n\n**The downside**: She is a team of one, meaning she has to rely on collaborative efforts with other teams and the volunteer efforts of TMRG leads.\n\nFor example, Candace planned the strategy, rollout, and process for standing up new TMRGs, then team members signed up and the TMRG leads were designated. Candace continues to work closely with TMRG leads for planning, oversight, and alignment with DIB and company goals. Another recent collaborative effort with other team members was adding executive sponsors from our E-group to TMRGs to assist in amplifying the voice and buy-in from leadership from the top down. Support from executives and visibility of leadership in DIB is imperative to its success.\n\n**We rely upon volunteers, often people from underrepresented groups, to work on diversity and inclusion initiatives along with their day jobs. This can lead to \"diversity fatigue,\" as they are the ones pushing the conversation forward or are seen as \"cultural ambassadors\" and are constantly fielding questions.**\n\n**The upside:** Our TMRG leaders are highly engaged, talented people.\n\n**The downside:** They are not paid to lead DIB efforts, but rather have a day job at GitLab that is unrelated to their work with the TMRGs.\n\n**The solution:** It is the responsibility of all team members at GitLab to drive a culture of belonging, which feeds into our recommendation to grow our ally base at GitLab. If we could grow our ally base to include more people that are not from underrepresented groups, we can help shift some of the burden from volunteers from underrepresented groups to include everyone at GitLab.\n\nIn lieu of more hires, more volunteers can increase our capacity to build more robust programs and will alleviate some of the program management burdens Candace and TMRG leaders are carrying in addition to their other jobs at GitLab.\n\n**Oftentimes, the diversity and inclusion team will be nestled within human resources, and not at the executive level, which can make it more challenging to advance diversity initiatives outside of HR activities.**\n\n**The upside**: Candace is the DIB lead and she reports to [Carol Teskey](/company/team/#cteskey), senior director of People Success, who is highly engaged in DIB initiatives. Our executive team recognizes both the business and intrinsic value of building robust DIB initiatives, and as a result we have not had the experience of DIB being sidelined; in fact DIB remains a top priority.\n\n**The downside**: We do not have anyone at the executive level that is solely responsible for DIB.\n\n**The solution**: GitLab is a growing company, and DIB is one of our core values. We are currently in the process of recruiting for a Chief People Officer, who will be responsible in part for DIB initiatives in partnership with Candace and Carol. By placing someone in a senior leadership or executive-level role that is committed to championing DIB initiatives we give DIB a seat at the executive table.\n\n**[Companies don’t often launch with a diversity and inclusion program already in place](https://hbr.org/2018/07/the-other-diversity-dividend), which means the diversity and inclusion lead has to work backwards on many initiatives. This phenomenon is often demonstrated by a homogeneous group of people (often, white men) in leadership roles.**\n\n**The upside:** We have the commitment from our CEO to work on recruiting and promoting more underrepresented groups to leadership roles. We also have buy-in from senior leadership to [build a mentorship program](/company/culture/inclusion/erg-minorities-in-tech/mentoring/) that will help people of color already working for GitLab to advance within the company. We’re also constantly iterating on our hiring process to ensure we are drawing from diverse talent pools (see below!).\n\n**The downside:** GitLab started in 2014, and our first hire devoted solely to DIB was in 2019, so we’re building and integrating programming within an existing infrastructure. There is a lack of diversity among people at the director level or higher, which we are working hard to remedy with new hires or promotions by 2021.\n\n**The solution**: The Minorities in Tech (MIT) TMRG is working with GitLab DIB to draw upon existing repositories for diverse talent by working with partners at historically Black colleges and universities (HBCUs), and organizations such as [AfroTech](https://afrotech.com/), etc. Candace has worked closely with our leadership and data team to review trends in hiring for our metrics and working on setting ambitious metrics around hiring underrepresented minorities for leadership roles.\n\nGitLab has also moved to a unique, [outbound hiring model](/handbook/hiring/candidate/faq/) that allows us to source a diverse pipeline of candidates for our [current and future roles](/jobs/). We’ve launched a number of [hiring initiatives](/company/culture/inclusion/talent-acquisition-initiatives/) to support this model and achieve our [objectives and key results](/company/okrs/fy21-q2/#3-ceo-great-team), including a training on diversity sourcing for our hiring teams and managers, as well as sourcing sessions that are dedicated to identifying candidates from underrepresented groups for some of our high-priority roles. Our approach also extends through the interview process. We’ve created [training](/company/culture/inclusion/#diversity-inclusion--belonging-training-and-learning-opportunities) on how to interview inclusively, we use inclusive language in our recruiting outreach, and we offer candidates the opportunity to connect with our employee resource group (TMRG) members during the hiring process.\n\n## The time is now\n\nThe journey to a more diverse and equitable workplace takes time, and for us the time is now. Senior leaders at GitLab are strongly engaged in DIB initiatives and have committed to allyship, but we recognize that change is happening too slowly.\n\nWe recognize that the Black community in particular is hurting right now. We see that the tech industry leaves too many talented people behind, or pushes them out due to unfair and noninclusive workplace practices. At GitLab everyone can contribute, and we want those contributions to be seen, heard, and felt at all levels of the company, by everyone in the company.\n\n_In our next blog post, we’ll dive into some of the challenges with diversity, inclusion, and belonging in the tech industry as a whole, and share some recommendations on how to overcome them._\n",[9],{"slug":5221,"featured":6,"template":680},"our-journey-to-a-diverse-and-inclusive-workplace","content:en-us:blog:our-journey-to-a-diverse-and-inclusive-workplace.yml","Our Journey To A Diverse And Inclusive Workplace","en-us/blog/our-journey-to-a-diverse-and-inclusive-workplace.yml","en-us/blog/our-journey-to-a-diverse-and-inclusive-workplace",{"_path":5227,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5228,"content":5234,"config":5241,"_id":5243,"_type":14,"title":5244,"_source":16,"_file":5245,"_stem":5246,"_extension":19},"/en-us/blog/our-step-by-step-guide-to-evaluating-runtime-security-tools",{"title":5229,"description":5230,"ogTitle":5229,"ogDescription":5230,"noIndex":6,"ogImage":5231,"ogUrl":5232,"ogSiteName":667,"ogType":668,"canonicalUrls":5232,"schema":5233},"Our step-by-step guide to evaluating runtime security tools","Key learnings from the GitLab Security team’s runtime security tool evaluation on Kubernetes clusters and Linux servers using real-world attack simulations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097534/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_1097303277_6gTk7M1DNx0tFuovupVFB1_1750097534344.jpg","https://about.gitlab.com/blog/our-step-by-step-guide-to-evaluating-runtime-security-tools","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Our step-by-step guide to evaluating runtime security tools\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hiroki Suezawa\"},{\"@type\":\"Person\",\"name\":\"Mitra Jozenazemian\"}],\n        \"datePublished\": \"2025-05-13\",\n      }",{"title":5229,"description":5230,"authors":5235,"heroImage":5231,"date":5238,"body":5239,"category":125,"tags":5240},[5236,5237],"Hiroki Suezawa","Mitra Jozenazemian","2025-05-13","Choosing the right runtime security tool is critical for protecting modern cloud-native environments.  We recently undertook a rigorous evaluation process using real-world attack simulations on our Kubernetes clusters and Linux servers. Why? Because traditional cloud audit logs do not provide enough detail, leaving critical gaps in threat detection, incident response, and forensic analysis. Our evaluation meticulously examined each critical stage from initial access to lateral movement and data exfiltration.\n\nWhile we won't be naming the specific vendor in this post, we want to share our detailed methodology and key learnings, providing a blueprint you can adapt for your own security tool evaluations.\n\n## Why are runtime security tools necessary?\n\nWithout runtime security tools, detecting “suspicious activities” and understanding “what actually happened” during an attack can become extremely challenging.\n\n### Limitations of cloud audit logs\n\n- **Lack of runtime details**  \n  Cloud audit logs primarily record operations and data access within the cloud. However, they do not capture runtime-level activities on systems such as Kubernetes servers – overlooking fine-grained command executions, process behaviors, and transient network activities.  \n\n- **Gaps in investigation and forensics**  \n    In Kubernetes environments, the absence of continuous, real-time logging can lead to the loss of critical activity records once a container terminates.\n\nAlthough well-known open-source runtime security tools are available, we decided to evaluate a commercial product to assess additional capabilities and enterprise-level support through attack simulation testing.\n\n### The role and purpose of runtime security tools\n\nRuntime security tools address these cloud audit log limitations by continuously monitoring systems in real time, offering the following functionalities:\n\n- **Threat detection**  \n  They monitor command executions, system calls, and network events in real-time to instantly detect abnormal behaviors, which enables the security team to respond rapidly. While some public cloud providers now offer limited runtime monitoring capabilities, these native solutions typically lack the depth and comprehensive coverage of dedicated security tools.  \n\n- **Incident response**  \n  By maintaining detailed chronological records of system activities, these tools provide security teams with the evidence needed to reconstruct attack timelines, determine the full scope of compromise, and conduct thorough forensic investigations after an incident occurs.  \n\n- **Scalability in investigations**  \n  Unlike traditional endpoint-by-endpoint forensic analysis, runtime security tools allow teams to collect, store, and analyze data centrally across the entire environment. This enables the efficient investigation of incidents without manually correlating disparate data sources.  \n\n(**Note:** Products that also offer container information or server vulnerability monitoring are outside the scope of this discussion.)\n\n## Key evaluation points\n\nOur primary objective in evaluating a runtime security tool was to determine its effectiveness in real-world security investigations. While evaluations often focus on the volume of detections or overall coverage, in actual operations, an overload of false positives – or tens of alerts for a single attack chain – can paralyze incident response teams. Therefore, our in-depth investigation centered on whether the tool could be used to support security operations with understanding and responding to actual attacks.\n\n- **Detection capability**  \n\n  - **Built-in rule**  \n    We assessed whether the built-in rule sets could effectively detect a variety of attack techniques and provide the necessary detail for accurate detection.\n\n  - **Custom detection capabilities**  \n    We evaluated the ease with which additional rules could be integrated and considered the quality of telemetry data delivered by the product, which enabled us to build our own monitoring solutions leveraging our unique understanding of our environment.\n\n  - **Alert quality**  \n    We also verified the rate of false positives. We confirmed that it effectively focuses on genuine security threats requiring action while minimizing noise that could cause alert fatigue.\n\n- **Incident response**  \n\n  - **Richness of logs**  \n    We evaluated whether the logs capture sufficient details – including executed commands, network connections, DNS queries, and process information – to fully reconstruct the incident. The ability to piece together the entire attack scenario and determine the full impact is crucial during incident response.  \n\n  - **Log searchability**  \n    We assessed how effectively the tool allowed us to search, filter, and correlate events across multiple systems. The ability to quickly query massive volumes of data is essential for timely investigations during security incidents. \n\n## Evaluation process\n\nWe divided our evaluation process into four major phases:\n\n1. **Development of attack scenarios**  \n   We designed scenarios that mimicked real-world attack flows. These scenarios, developed in collaboration with our Red Team, included the following elements:  \n   - attacks exploiting GitLab-specific vulnerabilities (e.g., CVE-2021-22205)  \n   - attacks leveraging the compromise of developer laptops  \n   - detailed step-by-step attack procedures  \n2. **Infrastructure setup**  \n   We deployed two parallel environments:  \n   - Kubernetes environment  \n   - Virtual machine (VM) environment \n\n   We installed an older version of GitLab to test known vulnerabilities and carried out similar evaluation flows in both the Kubernetes and VM environments.\n\n3. **Execution of attacks**  \n   We executed the attack flow for each scenario and meticulously recorded the timeline – from initial access to lateral movement and data exfiltration.  \n\n4. **Analysis of results**  \n   We conducted a comprehensive evaluation of detection capabilities, log richness, and areas for improvement, clearly outlining the strengths and weaknesses of the tools.\n\n### Attack scenarios\n\n**Scenario 1: Exploitation of a known GitLab vulnerability**\n\n![Scenario 1: Exploitation of a known GitLab vulnerability](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097560/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097560795.png)\n\n- **Attack flow**  \n  1. **Initial access**  \n     We simulated an attack by exploiting CVE-2021-22205, a known GitLab vulnerability that allows remote code execution. This granted us unauthorized access to the target system.  \n  2. **Command execution**  \n     After gaining access, we executed a reverse shell to interact remotely with the compromised machine and take control.  \n  3. **Deployment of a C2 agent**  \n     We installed a Command and Control (C2) agent to evaluate persistence techniques, enabling us to execute further commands and manage the system remotely.  \n  4. **Lateral movement**  \n     We then moved laterally within the environment, accessing Kubernetes API secrets and PostgreSQL databases.  \n  5. **Data exfiltration**  \n     We exfiltrated sensitive data via a dedicated C2 channel.\n\nThe following table summarizes the attack techniques used at each phase:\n\n| Initial access | Command and control | Enumeration | Credential access | Lateral movement | Collection | Exfiltration |\n| :---- | :---- | :---- | :---- | :---- | :---- | :---- |\n| Exploit GitLab application using known RCE vulnerability | Execute known reverse shell command | Harvesting info on the box | Get environment variables | Get secret from Kubernetes API | Get data from Cloud Storage | Exfiltration over C2 channel |\n|  | Install post-exploitation C2 agent |  | Get K8s token | Access to database | DNS exfiltration |  |\n|  | SOCKS proxy |  | Get cloud token via Cloud metadata server |  |  |  |\n\n\u003Cbr>\u003C/br>\n\n**Scenario 2: Compromise of a developer’s laptop**\n\n![Scenario 2: Compromise of a developer’s laptop](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097561/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097560796.png)\n\n- **Attack flow**  \n  1. **Initial compromise**   \n     We simulated an attacker compromising a developer’s laptop and abusing legitimate credentials to gain unauthorized access to internal resources.  \n  2. **Privilege escalation**  \n     Using the compromised credentials, we escalated privileges within the Kubernetes environment. \n  3. **Container manipulation**  \n     We deployed a privileged container to extract sensitive information.  \n  4. **Data exfiltration and persistence**  \n     We exfiltrated sensitive data while maintaining persistent access.\n\n      The following table summarizes the attack techniques used at each phase:\n\n| Initial access | Execution | Privilege escalation | Credential access | Lateral movement | Exfiltration |\n| :---- | :---- | :---- | :---- | :---- | :---- |\n| Valid account (kubectl) | Create a new container | Create a privileged container | Get K8s secrets via privilege of the node | Enter a container in the same node | Upload credential data to the attacker’s server |\n|  |  |  | Get an environment variable in the containers via `crictl` command on the node |  |  |\n\n\u003Cbr>\u003C/br>\n\n### Execution of the attacks\nDuring the execution of the attack scenarios, we followed these processes to obtain detailed records:\n\n- **Verification of detections:** We confirmed whether each attack command was detected and if the key points of each scenario were properly flagged.\n\n- **Timeline recording:** Every event was logged in sequence to assess how well command executions and network communications were captured.\n\n- **Scoring and analysis:** We scored each event based on detection effectiveness to quantitatively evaluate the tool’s performance.\n\n## What we learned\n\n### Don't overestimate – test commercial products yourself\n\n- **Identifying and addressing detection gaps (collaboration with vendors)**  \n  Our evaluation revealed that several critical scenarios and events were not detected or not logged. Consequently, we held meetings with the vendor and submitted multiple improvement requests. As a result, the vendor enhanced the product by adding new features and improving detection capabilities, with many issues identified during our evaluation subsequently addressed.  \n- **Understanding the limitations**  \n  Many modern runtime security tools use eBPF to monitor Linux system calls for detection. However, because commands executed within a C2 framework do not generate new processes, tracing these attack events proved challenging.  \n\n- **Recognizing tool boundaries**  \n  Our findings highlighted that, during incident response, relying solely on runtime security tools is insufficient. It is essential to combine them with other logs, such as Kubernetes audit logs and cloud logs, to gain a comprehensive view.\n\n### The importance of continuous runtime event logging in Kubernetes\n\nIn Kubernetes environments, there is a risk of losing forensic data when containers terminate, making continuous logging indispensable. Our evaluation confirmed that establishing a scalable, persistent logging infrastructure is crucial. Without proper runtime security tools, a significant amount of critical information could be lost post-attack.\n\n## Summary\n\nWe do not simply install security tools – we evaluate their utility to help ensure that our customers can safely use GitLab.com. Thorough product assessments like the one outlined above not only reveal unique use cases and areas for improvement that vendors might overlooks, but also provide valuable insights that benefit both the vendor and internal teams in organizing how the tool is best utilized.\n",[720,1298,9],{"slug":5242,"featured":6,"template":680},"our-step-by-step-guide-to-evaluating-runtime-security-tools","content:en-us:blog:our-step-by-step-guide-to-evaluating-runtime-security-tools.yml","Our Step By Step Guide To Evaluating Runtime Security Tools","en-us/blog/our-step-by-step-guide-to-evaluating-runtime-security-tools.yml","en-us/blog/our-step-by-step-guide-to-evaluating-runtime-security-tools",{"_path":5248,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5249,"content":5255,"config":5261,"_id":5263,"_type":14,"title":5264,"_source":16,"_file":5265,"_stem":5266,"_extension":19},"/en-us/blog/parallels-between-all-remote-and-cloud-computing",{"title":5250,"description":5251,"ogTitle":5250,"ogDescription":5251,"noIndex":6,"ogImage":5252,"ogUrl":5253,"ogSiteName":667,"ogType":668,"canonicalUrls":5253,"schema":5254},"The parallels between all-remote and cloud computing","The rise of the remote workplace has many parallels with the rise of cloud computing.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673019/Blog/Hero%20Images/vintage-keyboards.jpg","https://about.gitlab.com/blog/parallels-between-all-remote-and-cloud-computing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The parallels between all-remote and cloud computing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joyce Tompsett\"}],\n        \"datePublished\": \"2019-10-29\",\n      }",{"title":5250,"description":5251,"authors":5256,"heroImage":5252,"date":5258,"body":5259,"category":808,"tags":5260},[5257],"Joyce Tompsett","2019-10-29","\nAs a GitLab team member, I’m frequently asked what it’s like to work in an [all-remote company](/company/culture/all-remote/). Folks are curious – they like the idea of being able to work from home, but the idea of a company that is *fully* remote is a strange and [wondrous thing](/blog/all-remote-is-for-everyone/). Occasionally, I also encounter people for whom remote is an inconceivable concept. They argue it cannot be done, and won’t work, despite the fact that we (and a [growing list of others](/company/culture/all-remote/jobs/)) continue to thrive as a successful all-remote organization.\n\nThe rise of the remote workplace draws many parallels to the rise of [cloud computing](/blog/google-cloud-next-anthos-kubernetes/).\n\n## Bringing cloud computing to the mainstream\n\nWhile many were comfortable with the idea of network computing for a long time, the notion of cloud computing started to appear with more frequency at the turn of the millennium, championed by companies like Google and Amazon.\n\nThe notion behind cloud computing was that users could access their files, programs and even their compute resources over the network, and potentially from somewhere that was entirely removed from their physical location. There was no physical data center specific to that organization. The capability offered was important but the physical location of the resources was less relevant, as long as it met the users’ requirements.\n\nInitially, those requirements were challenging. Performance, reliability, and security were closely critiqued, but the *primary* challenge for humans was this: They would have to trust something they could not physically touch.\n\nAs confidence and familiarity with cloud computing increased, software as a service (SaaS) companies emerged. Salesforce and Workday were designed to run in the cloud from inception – and as they became successful, a bevy of SaaS applications emerged. Many companies, GitLab included, offer options for both on-premises and SaaS versions of their applications, and discussions in data centers are about migrating or modernizing legacy mission-critical applications to a cloud environment – a once unthinkable idea.\n\nCloud, once scoffed at by many, is now an expected part of most firms’ [strategic technology portfolio](/blog/kubernetes-chat-with-kelsey-hightower/).\n\n![Focus on outputs, not inputs such as being seen in an office](https://about.gitlab.com/images/blogimages/working-remotely-el-salvador.jpg){: .shadow.medium.center}\nFocus on outputs, not inputs such as being seen in an office.\n{: .note.text-center}\n\n## The evolution of the remote workplace\n\nA similar progression is occurring with remote work. Working off-premises has occurred for years, but it was typically reserved for certain positions or types of companies, and certainly was not a mainstream option.\n\nMany companies that offer remote work are [hybrid-remote](/company/culture/all-remote/hybrid-remote/), where an employee may work remotely, but the bulk of the company reports to a physical infrastructure, either centralized or distributed. The rise of [all-remote companies](/company/culture/all-remote/) such as GitLab is analogous to the rise of cloud-based companies such as Salesforce or Workday. We are starting to see other all-remote companies form and at GitLab we expect all-remote will become more common as we develop [best practices](/company/culture/all-remote/meetings/) and evolve more [efficient ways](/company/culture/all-remote/management/) of working remotely.\n\nWe believe that [all-remote is the future of work](/blog/all-remote-is-for-everyone/) – that it will be as likely that an organization is remote as not — particularly if physical manufacturing isn’t involved.\n\n## What all-remote and cloud computing have in common\n\nIn addition to the similarities in how they evolved, many of the benefits of cloud computing have analogues to those of remote work. Some of the cloud benefits we also see with remote working include:\n\n1. Increased agility: Remote work increases an organization’s flexibility to add, expand, or deploy employees in line with the company’s needs.\n1. Cost reductions: Many capital expenditures arguably become operational in all-remote workplaces. The lack of physical infrastructure to lease or buy and maintain offers cost savings to companies. This also lowers barriers to entry for new companies entering the market.\n1. Employee (compute) location independence: Distributed workplaces enables companies to attract and hire the best talent regardless of location, just as users can connect to the company from anywhere they have adequate Internet access.\n1. Productivity often increases as [asynchronous communication](/company/culture/all-remote/informal-communication/) allows multiple employees to work on the same data simultaneously, rather than waiting for synchronous meetings that function like bottlenecks.\n\nThis innovative deployment of people is strikingly similar to the innovative deployment of cloud computing. The key challenge the same for cloud and remote: Organizations need to [trust](/company/culture/all-remote/management/) the model and realize the [benefits](/company/culture/all-remote/benefits/) for themselves.\n\nCover image by [Darren Murph](https://twitter.com/darrenmurph)\n{: .note}\n",[677,9,832],{"slug":5262,"featured":6,"template":680},"parallels-between-all-remote-and-cloud-computing","content:en-us:blog:parallels-between-all-remote-and-cloud-computing.yml","Parallels Between All Remote And Cloud Computing","en-us/blog/parallels-between-all-remote-and-cloud-computing.yml","en-us/blog/parallels-between-all-remote-and-cloud-computing",{"_path":5268,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5269,"content":5275,"config":5280,"_id":5282,"_type":14,"title":5283,"_source":16,"_file":5284,"_stem":5285,"_extension":19},"/en-us/blog/parent-child-pipelines",{"title":5270,"description":5271,"ogTitle":5270,"ogDescription":5271,"noIndex":6,"ogImage":5272,"ogUrl":5273,"ogSiteName":667,"ogType":668,"canonicalUrls":5273,"schema":5274},"How to get started with Parent-child pipelines","We introduced improvements to pipelines to help scale applications and their repo structures more effectively. Here's how they work.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667040/Blog/Hero%20Images/parent_pipeline_graph.png","https://about.gitlab.com/blog/parent-child-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get started with Parent-child pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Ward\"}],\n        \"datePublished\": \"2020-04-24\",\n      }",{"title":5270,"description":5271,"authors":5276,"heroImage":5272,"date":5277,"body":5278,"category":743,"tags":5279},[1066],"2020-04-24","As applications and their repository structures grow in complexity, a repository `.gitlab-ci.yml` file becomes difficult to manage, collaborate on, and see benefit from. This problem is especially true for the increasingly popular \"[monorepo](https://en.wikipedia.org/wiki/Monorepo)\" pattern, where teams keep code for multiple related services in one repository. Currently, when using this pattern, developers all use the same `.gitlab-ci.yml` file to trigger different automated processes for different application components, likely causing merge conflicts, and productivity slowdown, while teams wait for \"their part\" of a pipeline to run and complete.\n\nTo help large and complex projects manage their automated workflows, we've added two new features to make pipelines even more powerful: Parent-child pipelines, and the ability to generate pipeline configuration files dynamically.\n\n## Meet Parent-child pipelines\n\nSo, how do you solve the pain of many teams collaborating on many inter-related services in the same repository? \nLet me introduce you to [Parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html), released with with [GitLab 12.7](/releases/2020/01/22/gitlab-12-7-released/#parent-child-pipelines). Splitting complex pipelines into multiple pipelines with a parent-child relationship can improve performance by allowing child pipelines to run concurrently. This relationship also enables you to compartmentalize configuration and visualization into different files and views. \n\n### Creating a child pipeline\n\nYou trigger a child pipeline configuration file from a parent by including it with the `include` key as a parameter to the `trigger` key. You can name the child pipeline file whatever you want, but it still needs to be valid YAML.\n\nThe parent configuration below triggers two further child pipelines that build the Windows and Linux version of a C++ application. \n\n```cpp\n#include \u003Ciostream>\nint main()\n{\n  std::cout \u003C\u003C \"Hello GitLab!\" \u003C\u003C std::endl;\n  return 0;\n}\n```\n\nThe setup is a simple one but hopefully illustrates what is possible.\n\n```yaml\nstages:\n  - triggers\n\nbuild_windows:\n  stage: triggers\n  trigger:\n    include: .win-gitlab-ci.yml\n  rules:\n    - changes:\n      - cpp_app/*\n\nbuild_linux:\n  stage: triggers\n  trigger:\n    include: .linux-gitlab-ci.yml\n  rules:\n    - changes:\n      - cpp_app/*\n```\n\nThe important values are the `trigger` keys which define the child configuration file to run, and the parent pipeline continues to run after triggering it. You can use all the normal sub-methods of `include` to use local, remote, or template config files, up to a maximum of three child pipelines.\n\nAnother useful pattern to use for parent-child pipelines is a `rules` key to trigger a child pipeline under certain conditions. In the example above, the child pipeline only triggers when changes are made to files in the _cpp_app_ folder.\n\nThe Windows build child pipeline (`.win-gitlab-ci.yml`) has the following configuration, and unless you want to trigger a further child pipeline, it follows standard a configuration format:\n\n```yaml\nimage: gcc\nbuild:\n  stage: build\n  before_script:\n    - apt update && apt-get install -y mingw-w64\n  script:\n    - x86_64-w64-mingw32-g++ cpp_app/hello-gitlab.cpp -o helloGitLab.exe\n  artifacts:\n    paths:\n      - helloGitLab.exe\n```\n\nDon't forget the `-y` argument as part of the `apt-get install` command, or your jobs will be stuck waiting for user input.\n\nThe Linux build child pipeline (`.linux-gitlab-ci.yml`) has the following configuration, and unless you want to trigger a further child pipeline, it follows standard a configuration format:\n\n```yaml\nimage: gcc\nbuild:\n  stage: build\n  script:\n    - g++ cpp_app/hello-gitlab.cpp -o helloGitLab\n  artifacts:\n    paths:\n      - helloGitLab\n```\n\nIn both cases, the child pipeline generates an artifact you can download under the _Job artifacts_ section of the Job result screen.\n\nPush all the files you created to a new branch, and for the pipeline result, you should see the two jobs and their subsequent child jobs.\n\n![Parent-child pipeline result](https://about.gitlab.com/images/blogimages/non-dynamic-pipelines.png){: .shadow.medium.center}\nThe result of a parent-child pipeline\n{: .note.text-center}\n\n## Dynamically generating pipelines\n\nTaking Parent-child pipelines even further, you can also dynamically generate the child configuration files from the parent pipeline. Doing so keeps repositories clean of scattered pipeline configuration files and allows you to generate configuration in your application, pass variables to those files, and much more.\n\nLet's start with the parent pipeline configuration file:\n\n```yaml\nstages:\n  - setup\n  - triggers\n\ngenerate-config:\n  stage: setup\n  script:\n    - ./write-config.rb\n    - git status\n    - cat .linux-gitlab-ci.yml\n    - cat .win-gitlab-ci.yml\n  artifacts:\n    paths:\n      - .linux-gitlab-ci.yml\n      - .win-gitlab-ci.yml\n\ntrigger-linux-build:\n  stage: triggers\n  trigger:\n    include:\n      - artifact: .linux-gitlab-ci.yml\n        job: generate-config\n\ntrigger-win-build:\n  stage: triggers\n  trigger:\n    include:\n      - artifact: .win-gitlab-ci.yml\n        job: generate-config\n```\n\nDuring our self-defined `setup` stage the pipeline runs the `write-config.rb` script. For this article, it's a Ruby script that writes the child pipeline config files, but you can use any scripting language. The child pipeline config files are the same as those in the non-dynamic example above. We use `artifacts` to save the generated child configuration files for this CI run, making them available for use in the child pipelines stages.\n\nAs the Ruby script is generating YAML, make sure the indentation is correct, or the pipeline jobs will fail.\n\n```ruby\n#!/usr/bin/env ruby\n\nlinux_build = \u003C\u003C~YML\n    image: gcc\n    build:\n        stage: build\n        script:\n            - g++ cpp_app/hello-gitlab.cpp -o helloGitLab\n        artifacts:\n            paths:\n                - helloGitLab\nYML\n\nwin_build = \u003C\u003C~YML\n    image: gcc\n    build:\n        stage: build\n        before_script:\n            - apt update && apt-get install -y mingw-w64\n        script:\n            - x86_64-w64-mingw32-g++ cpp_app/hello-gitlab.cpp -o helloGitLab.exe\n        artifacts:\n            paths:\n                - helloGitLab.exe\nYML\n\nFile.open('.linux-gitlab-ci.yml', 'w'){ |f| f.write(linux_build)}\nFile.open('.win-gitlab-ci.yml', 'w'){ |f| f.write(win_build)}\n```\n\nThen in the `triggers` stage, the parent pipeline runs the generated child pipelines much as in the non-dynamic version of this example but instead using the saved `artifact` files, and the specified `job`.\n\nPush all the files you created to a new branch, and for the pipeline result, you should see the three jobs (with one connecting to the two others) and the subsequent two children.\n\n![Dynamic parent-child pipeline result](https://about.gitlab.com/images/blogimages/dynamic-pipelines.png){: .shadow.medium.center}\nThe result of a dynamic parent-child pipeline\n{: .note.text-center}\n\n## Pipeline flexibility\n\nThis blog post showed some simple examples to give you an idea of what you can now accomplish with pipelines. With one parent, multiple children, and the ability to generate configuration dynamically, we hope you find all the tools you need to [build CI/CD workflows](/topics/ci-cd/) you need.\n\nYou can also watch a demo of Parent-child pipelines below:\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/n8KpBSqZNbk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[109,677,9,1294],{"slug":5281,"featured":6,"template":680},"parent-child-pipelines","content:en-us:blog:parent-child-pipelines.yml","Parent Child Pipelines","en-us/blog/parent-child-pipelines.yml","en-us/blog/parent-child-pipelines",{"_path":5287,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5288,"content":5293,"config":5298,"_id":5300,"_type":14,"title":5301,"_source":16,"_file":5302,"_stem":5303,"_extension":19},"/en-us/blog/patch-files-for-code-review",{"title":5289,"description":5290,"ogTitle":5289,"ogDescription":5290,"noIndex":6,"ogImage":4002,"ogUrl":5291,"ogSiteName":667,"ogType":668,"canonicalUrls":5291,"schema":5292},"How patch files can transform how you review code","We explain how to use patch files for better code review.","https://about.gitlab.com/blog/patch-files-for-code-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How patch files can transform how you review code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2021-03-15\",\n      }",{"title":5289,"description":5290,"authors":5294,"heroImage":4002,"date":5295,"body":5296,"category":743,"tags":5297},[3556],"2021-03-15","\n\nThis post is adapted from a [GitLab Unfiltered blog post](/blog/better-code-reviews/) written by me, [David O'Regan](/company/team/#oregand). In [part one of our series](/blog/tips-for-better-code-review/), we explain the importance of fairness and empathetic thinking in code reviews.\n{: .note .alert-info .text-center}\n\n## Patch files\n\nWanna know a `git secret`? [Patch files](https://git-scm.com/docs/git-format-patch) are magic when it comes to code reviews. A [patch is a text file whose contents are similar to Git diff](https://www.tutorialspoint.com/git/git_patch_operation.htm) but along with code it contains metadata about commits, for example, a patch file will include commit ID, date, commit message, etc. We can create a patch from commits and other people can apply them to their repository.\n\n## How to use a patch file\n\nA patch file is useful for code review because it allows the reviewer to create an actionable piece of code that shares their thoughts with the MR author. The code author can then apply the suggestion directly to their merge request. Patch files foster collaboration because it essentially creates a paired programming session in the review process.\n\nThis lets other people check your changes in the git patch files for any corrections that need to be made before the changes truly go live. After everything has been checked and corrections made, the changes can be pushed to the main branch of the repository. \n\nOne of the [better examples of a simple patch file in action](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/31686#note_341534370) comes from [Denys Mishunov](/company/team/#dmishunov), staff frontend engineer on the Create team.\n\n```bash\nIndex: app/assets/javascripts/projects/commits/components/author_select.vue\nIDEA additional info:\nSubsystem: com.intellij.openapi.diff.impl.patch.CharsetEP\n\u003C+>UTF-8\n===================================================================\n--- app/assets/javascripts/projects/commits/components/author_select.vue\t(revision 697d0734f1ae469a9a3522838e36b435d7cdf0be)\n+++ app/assets/javascripts/projects/commits/components/author_select.vue\t(date 1589356024033)\n@@ -110,6 +110,7 @@\n     \u003Cgl-new-dropdown\n       :text=\"dropdownText\"\n       :disabled=\"hasSearchParam\"\n+      toggle-class=\"gl-py-3\"\n       class=\"gl-dropdown w-100 mt-2 mt-sm-0\"\n     >\n       \u003Cgl-new-dropdown-header>\n\n```\n\nTo generate this suggestion, Denys pulled down the code he was reviewing and was able to offer a code solution based on his own testing. The patch file contains lots of valuable information, including the file affected, the date the revision was made, and the tool he used to generate the patch.\n\n## How to create a patch file\n\nYou can make a patch file using a web editor or with the command line. Read on to see how to create a patch file in GitLab both ways.\n\n### Patch files using a web editor\n\nIf you are rocking a nice fancy IDE or text editor, here's some good news: Most support patch files via plugins or out of the box. Here are some links to documentation on how to use patch files with different plugins: [VSCode](https://github.com/paragdiwan/vscode-git-patch), [Webstorm](https://www.jetbrains.com/help/webstorm/using-patches.html), [Atom](https://atom.io/packages/git-plus), and [Vim](https://vim.fandom.com/wiki/How_to_make_and_submit_a_patch).\n\n### Patch files using the command line\n\nOK command line users, you’ve made some commits, here’s your `git log`:\n\n```\ngit log --pretty=oneline -3\n\n* da33d1k - (feature_branch) Reviewer Commit 1 (7 minutes ago)\n\n* 66a84ah - (feature_branch) Developer 1 Commit (12 minutes ago)\n\n* adsc8cd - (REL-0.5.0, origin/master, origin/HEAD, master) Release 13.0 (2 weeks ago)\n\n``` javascript\n```\n\nThis command creates a new file, `reviewer_commit.patch`, with all changes from the reviewer's latest commit against the feature branch:\n\n```\n```git format-patch HEAD~1 --stdout > reviewer_commit.patch```\n\n### How to apply the patch\n\nFirst, take a look at what changes are in the patch. You can do this easily with `git apply`:\n\n```git apply --stat reviewer_commit.patch```\n```\n\nHeads up: Despite the name, this command won't actually apply the patch. It will just show the statistics about what the patch will do.\n\nSo now that we've had a look, let's test it first because not all patches are created equal:\n\n```\n```git apply --check reviewer_commit.patch```\n\nIf there are no errors we can apply this patch without worrying.\n\nTo apply the patch, you should use `git am` instead of `git apply`. The reason: `git am` allows you to sign off an applied patch with the reviewer's stamp.\n\ngit am --signoff &lt; reviewer_commit.patch\n\nApplying: Reviewer Commit 1\n\n``` javascript\n```\n\nNow run `git log` and you can see the `Signed-off-by` tag in the commit message. This tag makes it very easy to understand how this commit ended up in the codebase.\n\n### The benefits of patch files for code reviews\n\nSo now that you know how to make a shiny patch file, why would you use patch files as part of a code review process? There are a few reasons you might consider offering a patch file for a change you feel strongly about:\n\n*   It communicates you have invested a large amount of effort into understanding the author's solution and reasoning\n*   It demonstrates a passion for using teamwork to arrive at the best solution\n*   It shows the reviewer is willing to accept responsibility for this merge beyond just reading the code\n\nThere are a few alternatives to patch files for code reviews. GitLab has a [suggestion feature which allows the reviewer to suggest code changes using Markdown in a merge request](https://docs.gitlab.com/ee/user/discussions/#suggest-changes). The other option is to write raw code in Markdown right in the comment box. The downside is the reviewer doesn't have the option to test the code they are writing, making both of these options prone to error.\n\nIt is better to use a patch file because it involves the code reviewer in the review process in a collaborative way by default. In order to generate a patch, the reviewer must pull down the code, write the patch, test the change, and then submit it for the code author's consideration. Patch files increase the visibility for the reviewer and offers a fully collaborative experience for the code author.\n\nSome people might argue patch files are a cheeky way for a reviewer to force a change they would rather see make it into the codebase, but I believe that anyone who has taken the time to check out a branch, run the project, implement a change, and then submits that change back for a discussion is fully embracing collaboration.\n\nGitLab is evaluating whether to make patch files [part of the code review and merge request workflow](https://gitlab.com/gitlab-org/gitlab/-/issues/220044).\n\nLearn more about [the role of fairness in code review in part one of our blog series](/blog/tips-for-better-code-review/). Up next we explain why shipping small merge requests is in line with our iteration value.\n",[4454,9],{"slug":5299,"featured":6,"template":680},"patch-files-for-code-review","content:en-us:blog:patch-files-for-code-review.yml","Patch Files For Code Review","en-us/blog/patch-files-for-code-review.yml","en-us/blog/patch-files-for-code-review",{"_path":5305,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5306,"content":5311,"config":5317,"_id":5319,"_type":14,"title":5320,"_source":16,"_file":5321,"_stem":5322,"_extension":19},"/en-us/blog/path-to-decomposing-gitlab-database-part1",{"title":5307,"description":5308,"ogTitle":5307,"ogDescription":5308,"noIndex":6,"ogImage":2010,"ogUrl":5309,"ogSiteName":667,"ogType":668,"canonicalUrls":5309,"schema":5310},"Decomposing the GitLab backend database, Part 1: Designing and planning","A technical summary of the yearlong project to decompose GitLab's Postgres database. This first part focuses on the initial designing and planning of the project.","https://about.gitlab.com/blog/path-to-decomposing-gitlab-database-part1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Decomposing the GitLab backend database, Part 1: Designing and planning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dylan Griffith\"}],\n        \"datePublished\": \"2022-08-04\",\n      }",{"title":5307,"description":5308,"authors":5312,"heroImage":2010,"date":5314,"body":5315,"category":743,"tags":5316},[5313],"Dylan Griffith","2022-08-04","\nRecently we finished [migrating the GitLab.com monolithic Postgres database to two independent databases: `Main` and `CI`](/blog/splitting-database-into-main-and-ci/). After we decided how to split things up, the project took about a year to complete.\n\nThis blog post on decomposing the GitLab backend database is part one in a three-part series. The posts give technical details about many of the challenges we had to\novercome, as well as links to issues, merge requests, epics, and developer-facing documentation.\nOur hope is that you can get as much detail as you want about how we work on complex projects at GitLab.\n\nWe highlight the most interesting details, but anyone undertaking a similar\nproject might learn a lot from seeing all\nthe different trade-offs we evaluated along the way.\n\n- \"Decomposing the GitLab backend database, Part 1\" focuses on the initial design and planning of the project.\n- [Part 2](/blog/path-to-decomposing-gitlab-database-part2/) focuses on the\nexecution of the final migration.\n- [Part 3](/blog/path-to-decomposing-gitlab-database-part3/) highlights some interesting technical challenges we had to solve along the way, as well as some surprises.\n\n## How it began\n\nBack in early 2021, GitLab formed a \"database sharding\" team in an effort to\ndeal with our ever-growing monolithic Postgres database. This database stored\nalmost all the data generated by GitLab.com users, excluding git data and some other\nsmaller things.\n\nAs this database grew over time, it became a common source of\nincidents for GitLab. We knew that eventually we had to move away from a single\nPostgres database. We were already approaching the limits of what we could do\non a single VM with 96 vCPU and continually trying to vertically scale this VM\nwould eventually not be possible. Even if we could vertically scale forever,\nmanaging such a large Postgres database just becomes more and more difficult.\n\nEven though our database architecture has been monolithic for a long time, we already made use of many scaling techniques, including:\n\n- Using Patroni to have a pool of replicas for read-only traffic\n- Using PGBouncer for pooling the vast number of connections across our application fleet\n\n![Database architecture before decomposition](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/phase0.png)\n\nThese approaches only got us so far and ultimately would never fix the scaling\nbottleneck of the number of writes that need to happen, because all writes need to\ngo to the primary database.\n\nThe original objective of the database sharding team was to find a viable way\nto horizontally shard the data in the database. We started with exploring\n[sharding by top-level namespace][sharding_by_top_level_namespace_poc_epic]. This approach had some very complicated problems to solve, because the application\nwas never designed to have strict tenancy boundaries around top-level\nnamespaces. We believe that ultimately this will be a good way to split and\nscale the database, but we needed a shorter term solution to our scaling\nproblems.\n\nThis is when we evaluated different ways to extract certain tables into a\nseparate database. This approach is often referred to as \"vertical\npartitioning\" or \"functional decomposition.\" We assumed this extraction would likely\nbe easier, as long as we found a set of tables with loose coupling to the rest\nof the database. We knew it would require us to remove all joins to the rest of the\ntables (more on that later).\n\n## Figuring out where most write activity occurs\n\nWe did [an analysis][analysis_of_decomposition_tables] of:\n\n- Where the bulk of our data was stored\n- The write traffic (since ultimately the number of writes was the thing we were trying to reduce)\n\nWe learned that CI tables (at the time) made up around 40% to 50% of our write traffic. This seemed like a\nperfect candidate, because splitting the database in half (by write traffic) would be\nthe optimal scaling step.\n\nWe analyzed the data by splitting the database the following ways:\n\n| Tables group   | DB size (GB) | DB size (%) | Reads/s   | Reads/s (%) | Writes/s | Writes/s (%) |\n|----------------|--------------|-------------|-----------|-------------|----------|--------------|\n| Webhook logs   | 2964.1       | 22.39%      | 52.5      | 0.00%       | 110.0    | 2.82%        |\n| Merge Requests | 2673.7       | 20.20%      | 126073.4  | 1.31%       | 795.4    | 20.40%       |\n| CI             | 4725.0       | 35.69%      | 1712843.8 | 17.87%      | 1909.2   | 48.98%       |\n| Rest           | 2876.3       | 21.73%      | 7748488.5 | 80.82%      | 1083.6   | 27.80%       |\n\nChoosing to split the CI tables from the database was partly based on instinct.\nWe knew the CI tables (particularly `ci_builds` and\nrelated metadata) were already some of the largest tables in our database. It\nwas also a convenient choice because the CI tables were already prefixed with\n`ci_`. In the end, we realized only three tables were CI tables that weren't\nprefixed with `ci_`. You can see the up-to-date list of tables and their respective\ndatabase in [`gitlab_schemas.yml`][gitlab_schemas_yml].\n\nThe next step was to see how viable it actually was.\n\n## Proving it can work\n\nThe [first proof-of-concept merge request][initial_poc_mr_for_ci_decomposition] was created\nin August 2021. The proof-of-concept process involved:\n\n- Separating the database and seeing what broke\n- Fixing blockers and marking todo's until we ended up with the application \"pretty much working\"\n\nWe never merged this proof of concept, but we progressively broke out changes into smaller merge requests\nor issues assigned to the appropriate teams to fix.\n\n![Screenshot of large proof-of-concept MR](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/poc-mr-scale.png)\n\n## Chasing a moving target\n\nWhen tackling a large-scale architecture change, you might find\nyourself chasing a moving target.\n\nTo split the database, we had to change the application. Our code depended on all\nthe tables being in a single database. These changes took almost a year.\n\nIn the meantime, the application was constantly evolving\nand growing, and with contributions from many engineers who weren't necessarily\nfamiliar with the CI decomposition project. This meant that we couldn't just\nstart fixing problems. We knew we would likely find new problems being\nintroduced at a faster rate than we could remove them.\n\nTo solve this problem, we took an approach that was inspired by\n[how we handle new RuboCop rules](https://docs.gitlab.com/ee/development/contributing/style_guides.html#resolving-rubocop-exceptions).\nThe idea is to implement static or dynamic analysis to detect these\nproblems. Then we use this information to generate an allowlist of exceptions.\nAfter we have this allowlist of exceptions, we prevent any new violations from being created\n(as any new violations will fail the pipeline).\n\nThe result was a clear list to work on and visibility into our progress.\n\nAs part of making the application compatible with CI decomposition, we needed to\nbuild the following:\n\n- [Multiple databases documentation][docs_multiple_databases] taught\n  developers how to write code that is compatible with multiple databases.\n- [Cross-join detection][mr_cross_join_detection] analyzed all SQL queries\n  and raised an error if the query spanned multiple databases.\n- [Cross-database transaction detection][mr_cross_db_transaction_detection]\n  analyzed all transactions and raised an error if queries were sent to two\n  different databases within the context of a single transaction.\n- [Query analyzer metrics][mr_query_analyzer_metrics] analyzed all SQL queries\n  and tracked the different databases that would be queried (based on table\n  names). These metrics, which were sampled at a rate of 1/10,000 queries, because they are\n  expensive to parse, were sent to Prometheus. We used this data to get a sense\n  of whether we were whittling down the list of cross-joins in production.\n  It also helped us catch code paths that weren't covered by tests but were\n  executed in production.\n- [A Rubocop rule for preventing the use of\n  `ActiveRecord::Base`][mr_rubocop_rule_ar_base] ensured that we always\n  used an explicit database connection for Main or CI.\n\n## Using Rails multiple database support\n\nWhen we began this project, there were many improvements being added to Rails to\nsupport multiple databases. We wanted to make use of as much of this Rails\nbuilt-in support as possible to minimize the amount of custom database\nconnection logic we had to maintain.\n\nOne considerable challenge with this was our existing\n[custom database load balancing logic](https://docs.gitlab.com/ee/administration/postgresql/database_load_balancing.html).\nThe development of this complex implementation spans a long period of time, and\nit was designed differently to how Rails connections were managed in the new\nmulti-database support.\n\nIn the end, were able to use parts of Rails multiple database support, but\n[we still hope to one day remove our custom logic and only use what is supported by Rails][epic_to_move_to_native_rails_multiple_dataabase_support].\n\n## Implementing loose foreign keys\n\nThere were still some foreign keys that existed between CI and non-CI tables.\nWe needed a way to remove these keys but still keep the functionality of cascading\ndeletes.\n\nIn the end, [we implemented a solution][lfk_mr]\nwe call [\"loose foreign keys\"][lfk_docs]. This solution provides similar functionality and\nsupport for cascading `NULLIFY` or `DELETE` when a parent record is deleted in\nPostgres. It's implemented using Postgres on delete triggers, so it guarantees all\ndeletes (including bulk deletes) will be handled. The trigger writes to another\n\"queue\" table in Postgres, which then is picked up by a periodic Sidekiq worker\nto clean up all the impacted child records.\n\nWhen implementing this solution, we also considered the option of using\n[`ActiveRecord` `before_destroy` callbacks](https://apidock.com/rails/ActiveRecord/Callbacks/before_destroy).\nHowever they couldn't give us the same guarantees as Postgres foreign keys,\nbecause they can be intentionally or accidentally skipped.\n\nIn the end, the \"loose foreign keys\" solution also helped to solve another problem\nwe have, where very large cascading deletes cause timeouts and user experience issues.\nBecause it's asynchronous, we could easily control timing and batch sizes to never\nhave database timeouts and never overload the database with a single large\ndelete.\n\n## Mirroring namespaces and projects\n\nOne of the most difficult dependencies between CI and Main features in GitLab\nis how CI Runners are configured. Runners are assigned to projects and groups\nwhich then dictates which jobs they will run. This meant there were many join\nqueries from the `ci_runners` table to the `projects` and `namespaces` tables.\nWe solved most of these issues by refactoring our Rails code and queries, but\nsome proved very difficult to do efficiently.\n\nTo work around this issue, [we implemented][mr_namespace_project_mirroring] a mechanism to\n[mirror the relevant columns on `projects` and `namespaces` to the CI database][docs_ci_mirrored_tables].\n\nIt's not ideal to have to duplicate data that must be kept up-to-date like\nthis, but while we expected this may be necessary in a few places, it turns out\nthat we only ended up doing this for those two tables. All other joins could be\nhandled without mirroring.\n\nAn important part of our mirroring architecture is periodic\n[consistency checking][mr_namespace_project_mirroring_consistency_check].\nEvery time this process runs, it takes a batch of the mirrored rows and compares them\nwith the expected values. If there is a discrepancy, it schedules them to be fixed.\nAfter it's done with this batch, it updates a cursor in Redis to be used for the\nnext batch.\n\n## Creating a phased rollout strategy\n\nA key part of ensuring our live migration went as smooth as possible was by\nmaking it as small as possible. This was quite difficult as the migration from\n1 database to 2 databases is a discrete change that seems hard to break up into\nsmaller steps that can be rolled out individually.\n\nOne [early insight][initial_migration_plan_mr] was that we could actually reconfigure GitLab.com ahead of\ntime so that the Rails application behaved as though it was talking to two\nseparate databases long before we actually split the databases. Basically the\nidea was that the Rails processes already had two separate database connections,\nbut ultimately they were going to the same database. We could even break things\nout further since our read-only connections are designed to read from slightly\ndelayed replicas. So we could already have read-only connections going to the\nnewly created CI read-only replicas before the migration.\n\n![Database architecture before final migration step](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/phase4.png)\n\nThese insights led to our [seven-phase migration process][phased_migration_epic].\nThis process meant that by the time we got to the final migration on production\n(Phase 7), we were already incredibly confident that the application would work\nwith separate databases and the actual change being shipped was just trivial\nreconfiguration of a single database host. This also meant that all phases\n(except for Phase 7) had a very trivial rollback process, introduced very\nlittle risk of incident and could be shipped before we were finished with every\ncode change necessary to make the application support two databases.\n\nThe seven phases were:\n\n1. Deploy a Patroni cluster\n2. Configure Patroni standby cluster\n3. Serve CI reads from CI standby cluster\n4. Separate write connections for CI and Main (still going to the same primary host)\n5. Do a staging dry run and finishing the migration plan\n6. Validate metrics and additional logging\n7. Promote the CI database and send writes to it\n\n## Using labels to distribute work and prioritize\n\nNow that we had a clear set of phases we could prioritize our work. All issues\nwere assigned [scoped labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels)\nbased on the specific phase they corresponded to. Since the work spanned many\nteams in development and infrastructure, those teams could use the\nlabel to easily tell which issues needed to be worked on first. Additionally,\nsince we kept an up-to-date timeline of when we expected to ship each phase,\neach team could use the phase label to determine a rough deadline of when that\nwork should get done to not delay the project. Overall there were at least 193\nissues over all phases. Phase 1 and 2 were mostly infrastructure tasks tracked\nin a different group and with different labels, but the other phases contained\nthe bulk of the development team requirements:\n\n1. [8 Phase 3 issues](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=ci-decomposition%3A%3Aphase3)\n1. [78 Phase 4 issues](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=ci-decomposition%3A%3Aphase4)\n1. [7 Phase 5 issues](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=ci-decomposition%3A%3Aphase5)\n1. [64 Phase 6 issues](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=ci-decomposition%3A%3Aphase6)\n1. [34 Phase 7 issues](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=ci-decomposition%3A%3Aphase7)\n\n## Continue reading\n\nYou can read more about the final migration process and results of the migration in [Part 2](/blog/path-to-decomposing-gitlab-database-part2/).\n\n[initial_poc_mr_for_ci_decomposition]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67486\n[initial_migration_plan_mr]: https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/84588\n[lfk_mr]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69165\n[lfk_docs]: https://docs.gitlab.com/ee/development/database/loose_foreign_keys.html\n[epic_to_move_to_native_rails_multiple_dataabase_support]: https://gitlab.com/gitlab-org/gitlab/-/issues/296870\n[phased_migration_epic]: https://gitlab.com/groups/gitlab-org/-/epics/6160\n[sharding_by_top_level_namespace_poc_epic]: https://gitlab.com/groups/gitlab-org/-/epics/5838\n[analysis_of_decomposition_tables]: https://gitlab.com/groups/gitlab-org/-/epics/5883#summary-of-impact\n[gitlab_schemas_yml]: https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/database/gitlab_schemas.yml\n[docs_ci_mirrored_tables]: https://docs.gitlab.com/ee/development/database/ci_mirrored_tables.html\n[mr_cross_join_detection]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68620\n[mr_cross_db_transaction_detection]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67213\n[mr_query_analyzer_metrics]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/73839\n[mr_rubocop_rule_ar_base]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/64937\n[mr_namespace_project_mirroring]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/75517\n[mr_namespace_project_mirroring_consistency_check]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/81836\n[docs_multiple_databases]: https://docs.gitlab.com/ee/development/database/multiple_databases.html\n",[9,1698],{"slug":5318,"featured":6,"template":680},"path-to-decomposing-gitlab-database-part1","content:en-us:blog:path-to-decomposing-gitlab-database-part1.yml","Path To Decomposing Gitlab Database Part1","en-us/blog/path-to-decomposing-gitlab-database-part1.yml","en-us/blog/path-to-decomposing-gitlab-database-part1",{"_path":5324,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5325,"content":5330,"config":5334,"_id":5336,"_type":14,"title":5337,"_source":16,"_file":5338,"_stem":5339,"_extension":19},"/en-us/blog/path-to-decomposing-gitlab-database-part2",{"title":5326,"description":5327,"ogTitle":5326,"ogDescription":5327,"noIndex":6,"ogImage":2010,"ogUrl":5328,"ogSiteName":667,"ogType":668,"canonicalUrls":5328,"schema":5329},"Decomposing the GitLab backend database, Part 2: Final migration and results","This is the second in our three-part technical summary of the yearlong project to decompose GitLab's Postgres database.","https://about.gitlab.com/blog/path-to-decomposing-gitlab-database-part2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Decomposing the GitLab backend database, Part 2: Final migration and results\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dylan Griffith\"}],\n        \"datePublished\": \"2022-08-04\",\n      }",{"title":5326,"description":5327,"authors":5331,"heroImage":2010,"date":5314,"body":5332,"category":743,"tags":5333},[5313],"\n\n_This blog post is part 2 in a three-part series about decomposing the GitLab backend database. It focuses on the final migration\nprocess and highlights the results we achieved after the migration. If you want to read about the design and planning phase, check out [part 1](/blog/path-to-decomposing-gitlab-database-part1/)._\n\n## Deciding between zero downtime and full downtime\n\nEarly on in the project we thought it would be necessary for the migration to\nbe \"zero downtime\" or \"near-zero downtime\". We [came up with this plan][initial_migration_plan_mr]\nearly on which involved (in summary):\n1. The entire database would be replicated (including non-CI tables) using\n   Patroni cascading/standby replication to a dedicated CI Patroni cluster.\n   Replication only lags by at most a few seconds.\n2. Read traffic for CI tables could be split ahead of time to read from the CI\n   replicas.\n3. Write traffic would be split ahead of the migration into CI and Main by\n   sending these through separate dedicated PGBouncer proxies. Initially CI\n   writes still go to the Main database since the CI cluster is just a standby.\n   These proxies would be the thing we reconfigured during the live migration\n   to point at the CI cluster.\n4. At the time of migration we would pause writes to the CI tables by pausing\n   the CI PGBouncer.\n5. After pausing writes to the CI database we'd capture the current LSN\n   position in Postgres of the Main primary database (now expect no more writes\n   to CI tables to be possible).\n6. After that we wait until the CI database replication catches up to that\n   point.\n7. Then we promote the CI database to accept writes (remove the cascading\n   replication).\n8. Then we reconfigure writes to point to the CI database by updating the write\n   host in the CI PGBouncer.\n9. The migration is done.\n\n![Database architecture actual final migration step](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/phase4to7.png)\n\nThis approach (assuming that the CI replicas were only delayed by a few\nseconds) would mean that, at most, there would be a few seconds where CI writes\nmight result in errors and 500s for users. Many failures would likely already\nbe retried since much of CI write traffic goes via asynchronous (Sidekiq)\nprocesses that automatically retry.\n\nIn the end we didn't use this approach because:\n\n1. This approach didn't have an easy-to-implement rollback strategy. Data that\n   was written to CI tables during the migration would be lost if we rolled\n   back to just the Main database.\n2. The period of a few seconds where we expect to see some errors might make it\n   difficult for us to quickly determine the success or failure of the\n   migration.\n3. There was no hard business requirement to avoid downtime.\n\nThe [migration approach we ended up using][phase7_summary_epic] took two\nhours of downtime. We stopped all GitLab services that could read or write\nfrom the database. We also blocked user-level traffic at the CDN (Cloudflare) to allow us\nto do some automated and manual testing before opening traffic back up to\nusers. This allowed us to prepare a [slightly more straightforward rollback procedure][rollback_issue],\nwhich was:\n\n1. Reconfigure all read-only CI traffic back to the Main replicas\n2. Reconfigure all read-write CI traffic (via PGBouncer) back to the Main\n   primary database\n3. Increment the Postgres sequences for all CI tables to avoid overlapping with\n   data we created in our testing\n\nUltimately having a simple rollback mechanism proved very useful in doing many\npractice runs on staging.\n\n## Rehearsing the migration process\n\nBefore executing the final migration on GitLab.com, we executed seven rehearsals\nwith rollback and one final migration on our staging environment. In these\npractice runs, we discovered many small issues that would have likely caused\nissues in the production environment.\n\nThese rehearsals also gave all the participants an opportunity to perfect their steps\nin the process to minimize delays in our production rollout. This practice\nultimately allowed us to be quite confident in our timeline of at most two hours of downtime.\n\nIn the end, we finished the migration in 93 minutes, with a few small delays caused by\nsurprises we did not see in staging.\n\nThe rehearsal process was very time-consuming and a vast effort to execute in\nthe context of GitLab, where we all [work\nasynchronously](https://about.gitlab.com/company/culture/all-remote/asynchronous/)\nand across different timezones. However, it proved to be essential to the success of\nthis project.\n\n## Preparing for production migration\n\nOne week before our the final migration on production we prepared a production\nreadiness review issue for final approval from executives. This was a good\nopportunity to highlight all the preparation and validation we'd done to give\nus confidence in the plan. This also encouraged us to do extra validation where\nwe might expect to see questions or concerns about the plan.\n\nSome highlights from this review included:\n\n1. The amount of practice runs we'd done including details about the problems\n   we'd seen and resolved in staging\n2. Metrics which we'd observed to prove all the queries were using the right\n   database connections already\n3. Details about how long we'd been running without issues in local development\n   with all GitLab developers running with two databases by default\n4. Details about the rollback strategy we would use if necessary and how we\n   tested this rollback strategy in staging as well as some production\n   validation\n\n## Tracking the results\n\nAfter we completed the rollout we tracked\n[performance improvements across some metrics we expected to improve][performance_improvements_tracking_issue].\n\nThe data showed:\n\n- We decreased the CPU utilization of our primary database server, giving us much more headroom.\n\n  ![CPU peaks before and after decomposition shows smaller peaks after](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/cpu-post-decomposition-improvement.png)\n\n- We can free around 9.2TiB out of 22TiB from our Main database by truncating the CI tables.\n- We can free around 12.5TiB out of 22TiB from our CI database by truncating the Main tables.\n- We significantly reduced the rate of dead tuples on our Main database.\n- We significantly reduced vacuuming saturation. Before decomposition the Main database\n  maximum vacuuming saturation was up to 100%, with the average closer to 80%. After\n  decomposition, vacuuming saturation has stabilized at around 15% for\n  both databases.\n\n  ![Vacuum saturation before and after decomposition shows a decrease after decomposition](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/vacuum-saturation-post-decomposition.png)\n\n- We reduced the average query duration for our Sidekiq PGBouncer query\n  pool by at least a factor of 5 once we scaled up connection limits due to our\n  increased headroom. Previously we needed to throttle connections for\n  asynchronous workloads to avoid overloading the primary database.\n\n  ![Average active query duration by workload shows a decrease after scaling connections after decomposition](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/pgbouncer-active-query-duration-by-workload.png)\n\n## Continue reading\n\nYou can read more about some interesting technical challenges and surprises we\nhad to deal with along the way in\n[part 3](/blog/path-to-decomposing-gitlab-database-part3/).\n\n[initial_migration_plan_mr]: https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/84588\n[performance_improvements_tracking_issue]: https://gitlab.com/gl-retrospectives/sharding-group/-/issues/18\n[phase7_summary_epic]: https://gitlab.com/groups/gitlab-org/-/epics/7791\n[rollback_issue]: https://gitlab.com/gitlab-org/gitlab/-/issues/361759\n",[9,1698],{"slug":5335,"featured":6,"template":680},"path-to-decomposing-gitlab-database-part2","content:en-us:blog:path-to-decomposing-gitlab-database-part2.yml","Path To Decomposing Gitlab Database Part2","en-us/blog/path-to-decomposing-gitlab-database-part2.yml","en-us/blog/path-to-decomposing-gitlab-database-part2",{"_path":5341,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5342,"content":5347,"config":5351,"_id":5353,"_type":14,"title":5354,"_source":16,"_file":5355,"_stem":5356,"_extension":19},"/en-us/blog/path-to-decomposing-gitlab-database-part3",{"title":5343,"description":5344,"ogTitle":5343,"ogDescription":5344,"noIndex":6,"ogImage":2010,"ogUrl":5345,"ogSiteName":667,"ogType":668,"canonicalUrls":5345,"schema":5346},"Decomposing the GitLab backend database, Part 3: Challenges and surprises","This is the final installment in our three-part series about our yearlong project to decompose GitLab's Postgres database.","https://about.gitlab.com/blog/path-to-decomposing-gitlab-database-part3","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Decomposing the GitLab backend database, Part 3: Challenges and surprises\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dylan Griffith\"}],\n        \"datePublished\": \"2022-08-04\",\n      }",{"title":5343,"description":5344,"authors":5348,"heroImage":2010,"date":5314,"body":5349,"category":743,"tags":5350},[5313],"\n_This blog post is part 3 in a three-part series. It focuses on some interesting\nlow-level challenges we faced along the way, as well as some surprises we found during\nthe migration._\n\n- To read about the design and planning phase, check out [part 1](/blog/path-to-decomposing-gitlab-database-part1/).\n- To read about how we executed the actual migration and our results, check out [part 2](/blog/path-to-decomposing-gitlab-database-part2/).\n\n## The challenge with taking GitLab.com offline\n\nOne key part of our migration process was to take all systems offline that\ncould potentially talk to the database. This may seem as simple as \"shutting\ndown the servers\" but given the scale and complexity of GitLab.com's\ninfrastructure this proved to be really quite complex. Here is just a subset of\nthe different things we had to shut down:\n\n1. Kubernetes pods corresponding to web, API, and Sidekiq services\n2. Cron jobs across various VMs\n\n## Surprises along the way\n\nEven though we had rehearsed the migration many times in staging, there were\nstill some things that caught us off-guard in production. Luckily, we had\nallocated sufficient buffer time during the migration to resolve all of these\nduring the call:\n\n1. Autovacuum on our largest CI tables take a long time and can run at any\n   time. This delayed our migration as we needed to gain table locks for our\n   [write block\n   triggers](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/83211).\n   Adding these triggers requires a `ShareRowExclusiveLock` which cannot be\n   acquired while the autovacuum is running for that table. We disabled\n   some manual vacuum processes we were aware of ahead of the call but\n   autovacuum can happen at any time and our `ci_builds` table just happen to\n   have autovacuum at the time we were trying to block writes to this table. To\n   work around this we needed to temporarily disable autovacuum for the\n   relevant tables and then find the `pid` for the autovacuum process and\n   terminate this which allowed our triggers to be successfully added.\n2. Sometimes a long-running SSH session by an SRE or developer can leave open a\n   surprising database connection that needs to be tracked down and closed.\n3. Cron jobs can be run on various hosts that start rails processes or database\n   connections at any time. We had many examples that were created with\n   different purposes for database maintenance over the years, and we missed at\n   least one in our practice runs. They weren't as easy to detect on staging as\n   they may not all be configured on staging, or they run a lot faster on\n   staging. Also, our staging runs all happened on week days, but our\n   production migration happened on a weekend where it seemed we were\n   deliberately running some database maintenance workloads during low\n   utilization hours.\n4. Our Sentry client-side error tracking caused us to overload our Sentry\n   server due to many of users leaving open GitLab browser tabs. As\n   the browser tabs periodically make asynchronous requests to GitLab and get\n   errors (since GitLab.com was down), they then send all these errors to Sentry\n   and this overloaded our Sentry error server to the point we couldn't load it\n   to check for errors. This was quickly diagnosed based on the URL all the\n   requests were sent to, but it did delay our migration as checking for new\n   errors was key to determining success or failure of the migration.\n\n## Cascading replication doubles latency (triples in our case)\n\nA key initial step in our phased rollout was to move all read-only CI traffic\nto dedicated CI replicas. These were cascading replicas from the main Patroni\ncluster. Furthermore, we made the decision to create the standby cluster leader\nas a replica of another replica in the Main Patroni cluster. Ultimately this\nmeant the replication process for our CI replicas was\n`Main Primary -> Main Replica -> CI Standby Leader -> CI Replica`.\n\nThis change meant that our CI replicas had roughly three times as much latency\ncompared with our Main replicas, which previously served CI read-only traffic.\nSince our read-only load balancing logic is based on users sticking to the primary\nuntil a replica catches up with the last write that they performed, users\nmight end up sticking to the primary longer than they previously would have.\nThis may have served to increase our load on the primary database after rolling\nout Phase 3.\n\nWe never measured this impact, but in hindsight it is something we\nshould have factored in and benchmarked with our gradual rollout of Phase 3.\nAdditionally, we should have considered mitigating this issue by having the `CI\nStandby Leader` replicating straight from the `Main Primary` or adding the `CI\nStandby Leader` to the pool of replicas that we could service CI read-only\ntraffic.\n\n## Re-balancing PGBouncer connections incrementally without saturating anything\n\n[Phase 4 of our rollout][phase4_change_request] turned out to be one of the\ntrickiest parts of the migration. Since we wanted all phases (where possible)\nto be rolled out incrementally we needed some way to [solve for\nincrementally re-balancing connection pool limits][phase4_gradual_rollout_issue]\nfrom `GitLab -> PGBouncer -> Postgres` without exceeding the total connection\nlimit of Postgres or opening too many connections to Postgres that might\nsaturate CPU. This was difficult because all the connection limits were very\nwell tuned, and we were close to saturation across all these limits.\n\nThe gradual rollout of traffic for Phase 4 looked like:\n\n```mermaid\ngraph LR;\n    PostgresMain[(PostgresMain - Limit K max_connections)]\n    GitLabRails-->|100-X % of CI queries|PGBouncerMain\n    GitLabRails-->|X% of CI queries|PGBouncerCi\n    PGBouncerMain-->|Limit N pool_size|PostgresMain\n    PGBouncerCi-->|Limit M pool_size|PostgresMain\n```\n\nWe wanted to gradually increase X from 0-100. But this presented a problem, because\nthe number of connections to the `PostgresMain` DB will change\nwith this number.\n\nWe assume it has some initial limit `K` connections, and we\nassume this limit is deliberately just high enough to handle the current\nconnections from `PGBouncerMain` and not overload the CPU. We need to carefully\ntune `N` and `M` `pool_size` values across the separate PGBouncer processes to\navoid overloading the limit K, and we also need to avoid saturating the\nPostgres server CPU with too much traffic. At the same time, we need to ensure\nthere are enough connections to handle the traffic to both PGBouncer pools.\n\nWe addressed this issue by taking very small steps during low\nutilization hours (where CPU and connection pools weren't near saturation) and\ndoing very detailed analysis after each step. We would wait a day or so to figure out how\nmany connections to move over with the following steps, based on the number of\nconnections that were used by the smaller step. We also used what data we had\nearly on from table-based metrics to get an insight into how many connections\nwe thought we'd need to move to the CI PGBouncer pool.\n\nIn the end, we did need to make small adjustments to our estimates along the way\nas we saw saturation occur, but there was never any major user-facing saturation\nincidents, as the steps were small enough.\n\n## Final thoughts\n\nWe're very happy with the results of this project overall.\n\nA key objective of this project, which was hard to predict, was how the complexity of\nan additional database might impact developer productivity. They can't do\ncertain types of joins and there is more information to be aware of.\nHowever, many months have now passed, and it seems clear now that the complexity is mostly abstracted by Rails models. With continued large number of developers contributing, we have seen\nlittle-to-no impact on productivity.\n\nCombining this success with the huge scalability headroom we've gained, we believe this was a great decision for GitLab.\n\n## More reading\n\nThis blog series contains many links to see our early designing, planning, and\nimplementation of various parts of this project. GitLab's\n[transparency value](https://handbook.gitlab.com/handbook/values/#transparency)\nmeans you can read all the details and get a sense of what it's like to work on\nprojects like this at GitLab. If you'd like to know more or something was\nunclear please leave a comment, so we can make sure we share all our learnings.\n\n[phase4_change_request]: https://gitlab.com/gitlab-com/gl-infra/production/-/issues/6440\n[phase4_gradual_rollout_issue]: https://gitlab.com/gitlab-org/gitlab/-/issues/347203\n",[9,1698],{"slug":5352,"featured":6,"template":680},"path-to-decomposing-gitlab-database-part3","content:en-us:blog:path-to-decomposing-gitlab-database-part3.yml","Path To Decomposing Gitlab Database Part3","en-us/blog/path-to-decomposing-gitlab-database-part3.yml","en-us/blog/path-to-decomposing-gitlab-database-part3",{"_path":5358,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5359,"content":5365,"config":5370,"_id":5372,"_type":14,"title":5373,"_source":16,"_file":5374,"_stem":5375,"_extension":19},"/en-us/blog/people-ops-using-gitlab",{"title":5360,"description":5361,"ogTitle":5360,"ogDescription":5361,"noIndex":6,"ogImage":5362,"ogUrl":5363,"ogSiteName":667,"ogType":668,"canonicalUrls":5363,"schema":5364},"GitLab People Ops: Getting drunk on our own wine","How our People Ops team uses GitLab day to day: from onboarding new GitLab team-members to keeping our handbook up to date.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678697/Blog/Hero%20Images/how-people-ops-uses-gitlab.jpg","https://about.gitlab.com/blog/people-ops-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab People Ops: Getting drunk on our own wine\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chloe Whitestone\"}],\n        \"datePublished\": \"2018-05-25\",\n      }",{"title":5360,"description":5361,"authors":5366,"heroImage":5362,"date":5367,"body":5368,"category":808,"tags":5369},[1614],"2018-05-25","\nWe’ve heard people say \"[every company is a software company](https://www.forbes.com/sites/techonomy/2011/11/30/now-every-company-is-a-software-company/#5761b57cf3b1),\" but what about the people who work there? At GitLab, we [drink our own wine](/company/culture/), and that means all of our team members, in some way or another, are technical because we use GitLab ourselves. In [People Ops and recruiting](/handbook/people-group/), I use GitLab every day; just take a look at my [activity chart](https://gitlab.com/chloe)!\n\n![Chloe's GitLab Activity Chart](https://about.gitlab.com/images/blogimages/gitlab-chloe.png){: .shadow.medium.center}\n\nThese blue squares represent contributions I’ve made across the GitLab project (and the white ones prove that work/life balance exists!).\n\n## Getting started with issues\n\nA good portion of those blue squares are dedicated towards issues, specifically pre-established template issues, such as [the onboarding issue](https://gitlab.com/gitlab-com/people-group/employment-templates/-/blob/main/.gitlab/issue_templates/onboarding.md). This is the \"first look\" our new hires have into GitLab and our workflow, and it’s a fantastic way to get them using issues, and thus GitLab the product, right away. One of the tasks in this issue is \"add yourself to the [team page](/company/team/),\" so within the first week at GitLab, all team members submit a merge request, even if they’ve never coded before. Another task is to \"make an improvement to the handbook,\" which both encourages new hires to submit another merge request and to explore our handbook and adopt our ethos of \"everyone can contribute.\"\n\n>within the first week at GitLab, all team members submit a merge request, even if they’ve never coded before\n\nOther issue templates we have and use regularly are [offboarding](https://gitlab.com/gitlab-com/people-group/employment-templates/-/blob/main/.gitlab/issue_templates/offboarding.md) and [opening new vacancies](https://gitlab.com/gitlab-com/people-ops/vacancy/blob/master/.gitlab/issue_templates/vacancy.md). People Ops uses these issue templates to maintain version control, enable everyone to contribute, and allow us to continually iterate and improve on how we onboard our new hires, all of which promote the GitLab [values](https://handbook.gitlab.com/handbook/values/).\n\nWe constantly iterate on all of our issue templates, predominantly the onboarding issue template mentioned above. You can view its [history](https://gitlab.com/gitlab-com/people-ops/employment/commits/master/.gitlab/issue_templates/onboarding.md) and see how everyone at the company iterates on our onboarding issue – not just People Ops, but also new hires and seasoned GitLab team-members. You can also view some of the ideas we’re working through in the [\"Overhaul onboarding for Ta-NEW-kis\" issue](https://gitlab.com/gitlab-com/people-ops/General/issues/105), and feel free to contribute your own ideas!\n\n## Transparent by default\n\nPeople Ops and HR departments are not typically considered transparent at most companies, but here at GitLab we try our best to be as transparent as possible. The only times we keep things confidential are when we are legally required to, or to protect someone’s privacy. Everything else is fair game! Some great examples in our handbook are our [identity data](/company/culture/inclusion/identity-data/), [internal feedback](/company/culture/internal-feedback/), and the questions we ask in our [screening calls with candidates](/handbook/hiring/interviewing/#screening-call). We make it a point to keep this data, as well as other handbook pages dedicated to People Ops and recruiting, up to date and accurate.\n\n## Everyone can contribute\n\nWe encourage our team members and the wider GitLab community to contribute and give us their ideas because they will have a fresh look and unique perspective, which can only improve our own understanding.\n\nI remember when I joined GitLab a year ago, I interviewed with [Sid Sijbrandij](/company/team/#sytses), our CEO, and he asked me what I wanted to accomplish within my first month at GitLab. I told him I wanted to become proficient in Git so that I could properly contribute, and he was surprised! But I was steadfast, and within my first two weeks, I’d already started contributing via my local machine. Sure, I’m not a developer by any means, but I use Git every day, have figured out quite a few things both on my own, and with the help of our #git-help Slack channel, was even granted merge powers last year! Here at GitLab, everyone can contribute, no matter what your background is.\n\nPhoto by [Maxime Le Conte des Floris](https://unsplash.com/) on Unsplash\n{: .note}\n",[9,811,723],{"slug":5371,"featured":6,"template":680},"people-ops-using-gitlab","content:en-us:blog:people-ops-using-gitlab.yml","People Ops Using Gitlab","en-us/blog/people-ops-using-gitlab.yml","en-us/blog/people-ops-using-gitlab",{"_path":5377,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5378,"content":5384,"config":5390,"_id":5392,"_type":14,"title":5393,"_source":16,"_file":5394,"_stem":5395,"_extension":19},"/en-us/blog/pick-your-brain-interview-brandon-foo",{"title":5379,"description":5380,"ogTitle":5379,"ogDescription":5380,"noIndex":6,"ogImage":5381,"ogUrl":5382,"ogSiteName":667,"ogType":668,"canonicalUrls":5382,"schema":5383},"Pick Your Brain interview with CEO Sid Sijbrandij","Brandon Foo, co-founder and CEO of Polymail (YC S16), recently sat down with GitLab CEO Sid Sijbrandij.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680453/Blog/Hero%20Images/pick-your-brain-interview.jpg","https://about.gitlab.com/blog/pick-your-brain-interview-brandon-foo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pick Your Brain interview with CEO Sid Sijbrandij\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brandon Foo\"}],\n        \"datePublished\": \"2017-06-02\",\n      }",{"title":5379,"description":5380,"authors":5385,"heroImage":5381,"date":5387,"body":5388,"category":787,"tags":5389},[5386],"Brandon Foo","2017-06-02","\n\nI sat down for a “[pick your brain](/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)” meeting with GitLab’s CEO and Co-founder, [Sid Sijbrandij](/company/team/#sytses), to learn about his approach towards different aspects of building a successful startup. Here are some highlights of the conversation.\n\n\u003C!-- more -->\n\n**Brandon: When you were an earlier company around your seed stage, what were your most effective growth strategies?**\n\n**Sid:** GitLab got started as a [Show HN of GitLab.com](https://news.ycombinator.com/item?id=4428278). We’ve always tried to see where our users were and talk with them there.\n\nWhen you find people who have a need for your product, you start by trying to bring it to their attention. Then you enter a phase where they care about your product, and they start asking you for more — that’s easy, that’s the honeymoon phase. Now we’re getting to the phase where people think of GitLab as a given, and that it should be perfect, so they tell you the things that could be better.\n\n**Brandon: How do you think about product strategy with respect to building new features versus improving or increasing adoption of existing features?**\n\n**Sid:** It’s kind of a pendulum that swings back and forth. We focused a lot on new features for a while to accomplish our [idea to production vision](https://www.youtube.com/watch?v=PoBaY_rqeKA), and now this quarter [we’re focusing](/direction/) on increasing adoption of existing features. Mostly this is necessary for newer features, but that’s not the same as increasing the features’ scope, it’s more a question of how we can increase adoption for the features we already have, and seeing which functions are missing. When we release features and have the suspicion few people are using them, we evaluate to make sure those features are things that people can really use. Most recently in [9.2](/releases/2017/05/22/gitlab-9-2-released/) we added the framework to translate GitLab into any language, and allowed users to specify multiple assignees to better track shared ownership of an issue. [In 2017](/direction/#2017-goals), we’ll continue to ship features tailored for enterprise development teams, and make it easier to build, deploy, and monitor applications within GitLab.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/WBf_DA0FF9k\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n**Brandon: How do you balance building visionary features that people aren’t necessarily asking for vs. building in direct response to customer requests?**\n\n**Sid:** We do both. We started off doing just version control and code review, and now GitLab delivers the entire DevOps pipeline, everything from chatting about an idea and planning it, to getting it out in production and monitoring. We envision enabling everyone to collaborate on digital content, so they can work together and achieve better results. No one asked for that — it’s something we did, it’s the future of the company now. We’d have been in a bad spot if we hadn’t done that.\n\nAt the same time, don’t lose track of what your customers are asking for. Balancing that is the hard part. The natural result is too little visionary stuff; if you build the right company, then everyone will be listening to your customers and screaming, “Let’s build the things customers want!” So the leadership’s task is focusing on what we need to do in order to be a better company in five years.\n\n**Brandon: Since you bootstrapped for some time, how did you decide when it was the right time to raise institutional funding?**\n\n**Sid:** One big reason is the talent we wanted to attract. While we were in YC, we tried to hire a good sales leader, but everyone we approached wanted stock in the company. We hadn’t raised any outside money so stock was all mine and my co-founder [Dmitriy’s](/company/team/#dzaporozhets) — he started GitLab and I started GitLab.com.\n\nThis made clear that if we were unable to give out stock, we were not going to hire the best people; if we’re not getting the best people, we’re going to lose in the marketplace. If you give people stock while not taking outside money, you’ll still grow but very slowly, which is not the kind of deal these executives were expecting. They expect that after 6-7 years the stock is worth something and they can get liquid. The only way to get there is to attract external capital.\n\n**Brandon: Is there anything that you would change in retrospect that you think might improve the outcome of where GitLab is today?**\n\n**Sid:** In hindsight, I’d rather have started GitLab.com a bit later. We’ve grown so fast since then that we’ve been behind in making a great experience for our users.\n\nI would focus on people running GitLab self-managed, and start GitLab.com when we were ready for it. I’d rather have people not use our product than using the product and not being absolutely happy about it. It’s not about users, it’s about happy users.\n\nIf not 100% of the users are happy, we’re not doing a good enough job.\n\n## About the Guest Author\n\nBrandon Foo is the Co-founder and CEO of [Polymail](https://polymail.io/), an email productivity platform designed for modern teams and companies.\n",[2749,873,9],{"slug":5391,"featured":6,"template":680},"pick-your-brain-interview-brandon-foo","content:en-us:blog:pick-your-brain-interview-brandon-foo.yml","Pick Your Brain Interview Brandon Foo","en-us/blog/pick-your-brain-interview-brandon-foo.yml","en-us/blog/pick-your-brain-interview-brandon-foo",{"_path":5397,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5398,"content":5404,"config":5410,"_id":5412,"_type":14,"title":5413,"_source":16,"_file":5414,"_stem":5415,"_extension":19},"/en-us/blog/play-reviewer-roulette",{"title":5399,"description":5400,"ogTitle":5399,"ogDescription":5400,"noIndex":6,"ogImage":5401,"ogUrl":5402,"ogSiteName":667,"ogType":668,"canonicalUrls":5402,"schema":5403},"Let's play Reviewer Roulette! An easy way to find a reviewer for your merge request","Finding the right reviewer for a merge request can be tough. Reviewer Roulette makes the decision easier – by making it random!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672195/Blog/Hero%20Images/play-reviewer-roulette.jpg","https://about.gitlab.com/blog/play-reviewer-roulette","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Let's play Reviewer Roulette! An easy way to find a reviewer for your merge request\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dennis Tang\"}],\n        \"datePublished\": \"2018-06-28\",\n      }",{"title":5399,"description":5400,"authors":5405,"heroImage":5401,"date":5407,"body":5408,"category":743,"tags":5409},[5406],"Dennis Tang","2018-06-28","\n\nGitLab is [growing quickly], and [constantly looking for more talented people] to join the team. While exciting, it can be tough to keep track of who's who, especially when you're new to the company.\n\nSo how do you know who to contact if you need a pair of eyes on your merge request?\n\n## Meet Reviewer Roulette!\n\nReviewer Roulette is a Slack slash command to help GitLab team-members randomly select a person from a given team, which can be especially useful as multiple teams work together to deliver features in a single merge request.\n\n![Demo of /reviewerroulette](https://about.gitlab.com/images/blogimages/play-reviewer-roulette/demo.gif){: .shadow.center.medium}\n\n---\n\n## The idea\n\nIt's quite common to find that your issue or merge request will have multiple labels to associate different feature areas and teams that are contributing to them. As someone who's recently joined GitLab, I'm still getting to know [all the different teams and people] that work at GitLab. That said, I'm working on a feature with the [CI/CD](/topics/ci-cd/) or discussion team, who should I reach out to if I have questions or need a review of my work?\n\n![Various labels on Merge Requests in gitlab-ce](https://about.gitlab.com/images/blogimages/play-reviewer-roulette/labels.png){: .shadow.center.medium}\n\nThe idea arose from the [frontend team weekly call] where [Tim Zallmann] reminded us that, \"Everyone on the frontend team is a reviewer.\" The team previously had a microservice built by [Luke Bennett] for this, however, it's no longer online. Beyond that, wouldn't it be convenient to simply type a command in Slack to be suggested someone to ping for a review?\n\nI can say with confidence that GitLab is a company that truly exemplifies its values, and I was empowered by the value of [collaboration] to build something that could help our team (and others!) find reviewers. I couldn't be the only one who had this problem!\n\n> **Do it yourself** Our collaboration value is about helping each other when we have questions, need critique, or need help. No need to brainstorm, wait for consensus, or do with two what you can do yourself.\n\nI quickly went to work to (hastily) put together a proof-of-concept to see if it would something that people would want to use.\n\n## Decision fatigue, be gone!\n\n![Screenshot of /reviewerroulette](https://about.gitlab.com/images/blogimages/play-reviewer-roulette/screenshot.png){: .shadow.right.small.wrap-text}\n\nIt was presented to the frontend team and received warmly, and many people were keen to contribute and also [suggest ideas] that would make it even more useful!\n\nAlthough it was originally intended for the frontend group, since I was building it from scratch, it was very easy to make the decision to have it work for all engineering teams.\n\nWith Reviewer Roulette, I don't have to ping entire Slack channels or guess from our team page to try to find _someone_ to talk to.\n\nAdditionally, it provides a number of other benefits such as:\n\n1.  It promotes a more balanced distribution of reviewers amongst the team.\n    * Less experienced reviewers have more opportunities to do code reviews\n    * More experienced reviewers are not as heavily relied on\n1.  It allows more team members to learn more about parts of the codebase they may not be as familiar with, increasing the knowledge of the team overall\n1.  It provides more opportunities to apply our [code review guidelines] or [frontend style guides] to all team members\n1.  It reduces bias towards reviewers that you may unconsciously prefer to select\n\nOf course, we have our various subject matter experts such as our [frontend domain experts] and [gitlab-ce maintainers] who may provide the best insight for a given topic, but it's good to randomly select reviewers by default!\n\n## How it's made\n\nWhen it came to thinking about how to build Reviewer Roulette, it wasn't so much about the tech, than it being about being enabled to create something that will benefit the team.\n\nEmbracing our value of [efficiency], the solution is very much a boring one. It's a simple Node.js application utilizing `js-yaml` and `express` to be able to search our [team structure file] and respond to Slack's slash command requests properly.\n\n## What's next\n\nReviewer Roulette is seeing regular usage, and has [plenty of features planned] to hopefully increase its usefulness.\n\nWhile originally intended for engineering, it can [help the entire company] out. In addition to our [Coffee Break calls], we also have [a step in our onboarding process] to meet five different people across different teams and countries. That's something that Reviewer Roulette could easily help with!\n\nWe also plan on moving it to the frontend [GKE] cluster, and activating [Auto DevOps] to make builds and deployments painless.\n\nIf you're interested in checking it out, feel free to take a look at the [project]! Perhaps it might be useful to you and your team?\n\n## Share your thoughts!\n\nIf there's interest in using Reviewer Roulette for your community contribution to GitLab projects, let us know in the comments and we can release it on Slack for everyone to use!\n\nWhat do you think of Reviewer Roulette? Is this something you would use for your team? How do you pick people for reviewing?\n\n[Photo](https://unsplash.com/photos/w6OniVDCfn0?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Krissia Cruz on [Unsplash](https://unsplash.com/search/photos/roulette?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n[growing quickly]: /company/okrs/#ceo-great-team-active-recruiting-for-all-vacancies-number-of-diverse-per-vacancy-real-time-dashboard\n[constantly looking for more talented people]: /jobs/\n[all the different teams and people]: /company/team/\n[frontend domain experts]: /handbook/engineering/frontend/#frontend-domain-experts\n[gitlab-ce maintainers]: /handbook/engineering/projects/#gitlab-ce\n[frontend team weekly call]: /handbook/engineering/frontend/#frontend-group-calls\n[Tim Zallmann]: /company/team/#tpmtim\n[Luke Bennett]: /company/team/#__lukebennett\n[suggest ideas]: https://gitlab.com/dennis/reviewer-roulette/issues/\n[plenty of features planned]: https://gitlab.com/dennis/reviewer-roulette/issues/\n[efficiency]: https://handbook.gitlab.com/handbook/values/#efficiency\n[team structure file]: https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/team.yml\n[auto devops]: https://docs.gitlab.com/ee/topics/autodevops/\n[coffee break calls]: /company/culture/all-remote/tips/#coffee-chats\n[a step in our onboarding process]: https://gitlab.com/gitlab-com/people-group/employment-templates/-/blob/main/.gitlab/issue_templates/onboarding.md#day-4-morning-social\n[help the entire company]: https://gitlab.com/dennis/reviewer-roulette/issues/12\n[gke]: /partners/technology-partners/google-cloud-platform/\n[project]: https://gitlab.com/dennis/reviewer-roulette/\n[collaboration]: https://handbook.gitlab.com/handbook/values/#collaboration\n[code review guidelines]: https://docs.gitlab.com/ee/development/code_review.html\n[Frontend style guides]: https://docs.gitlab.com/ee/development/fe_guide/index.html#style-guides\n",[9,811,3138],{"slug":5411,"featured":6,"template":680},"play-reviewer-roulette","content:en-us:blog:play-reviewer-roulette.yml","Play Reviewer Roulette","en-us/blog/play-reviewer-roulette.yml","en-us/blog/play-reviewer-roulette",{"_path":5417,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5418,"content":5423,"config":5430,"_id":5432,"_type":14,"title":5433,"_source":16,"_file":5434,"_stem":5435,"_extension":19},"/en-us/blog/polishing-gitlabs-ui-a-new-color-system",{"title":5419,"description":5420,"ogTitle":5419,"ogDescription":5420,"noIndex":6,"ogImage":1452,"ogUrl":5421,"ogSiteName":667,"ogType":668,"canonicalUrls":5421,"schema":5422},"Polishing GitLab’s UI: A new color system","Senior UX Designer Pedro Moreira da Silva takes us on a deep dive into how the UX team improved the GitLab UI’s color palette.","https://about.gitlab.com/blog/polishing-gitlabs-ui-a-new-color-system","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Polishing GitLab’s UI: A new color system\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pedro Moreira da Silva\"}],\n        \"datePublished\": \"2018-03-29\",\n      }",{"title":5419,"description":5420,"authors":5424,"heroImage":1452,"date":5426,"body":5427,"category":743,"tags":5428},[5425],"Pedro Moreira da Silva","2018-03-29","\nWe receive a lot of feedback from our users and the broader community. After\nhearing that there is a perceived lack of consistency and quality in GitLab’s\nUI, we decided to take a look at our _color palette_.\n\n\u003C!-- more -->\n\nAesthetic aspects like this are a fundamental part of the UI. If we don’t get\nthese right, everything else in the UI won’t feel, look, or behave correctly.\nLike a house, these aesthetics are the foundation upon which everything else is\nbuilt.\n\nOur color palette had various issues, so we started by:\n\n- [building a better palette][ce#28614] that aligned with our goals,\n- and [defining a color priority system][ce#31094] that helped us move forward.\n\n## Why start with colors?\n\nThere are many aesthetic aspects to a UI. So why tackle colors first? Well…\n\n- **Colors are easy to change**: it’s just a matter of changing simple values in\n  our [`variables.scss`](https://gitlab.com/gitlab-org/gitlab-ce/blob/1553a34dbff167978f5dc81cc3a21e0b3b2b2bfa/app/assets/stylesheets/framework/variables.scss#L14)\n  file.\n- **Color changes don’t affect layout**: we weren’t reinventing the wheel, so\n  these changes wouldn’t influence the layout and spacing between elements like\n  typography can.\n\nAnd, more subjectively, colors have a huge impact on the perception of a UI.\nIt’s said that 90 percent of information entering the brain is visual and color\nis an attention-grabbing device.\n\n## Issues with the previous color palette\n\n![Previous color palette](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/prev-palette.png)\n\n### It didn’t extend the brand colors\n\nThey weren’t in line with our [brand colors](https://gitlab.com/gitlab-com/gitlab-artwork/blob/9b07772f44a9fa51f395a95928a6e41c61a5b1cb/colors),\nwith the most obvious example being the pinkish-red normally associated with\nnegative aspects like errors or irreversible actions. We already have a red from\nour brand, so why use a different one?\n\n### There were too many similar colors\n\nWith so many colors, it wasn’t easy to tell them apart. They were so similar\nthat they no longer brought value to the table, just more guesswork and\nmaintenance.\n\n### There wasn’t enough contrast\n\nMany of our color combinations did not meet the contrast ratios defined in the\n[Web Content Accessibility Guidelines (WCAG)][wcag-contrast].\n\nNote that some of these issues were also applicable to grayscale colors (also\ncalled “achromatic”).\n\n## Building a better palette\n\nAt GitLab, we’ve done a lot of things while standing on the shoulders of giants,\naligning with our company value of [boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions).\nAs such, one of our initial thoughts was to use an existing color palette,\nsomething that could save us time and maybe serve as the basis for our work.\n\nWe soon found [Open color](https://yeun.github.io/open-color/), an open source\ncolor scheme optimized for UI. It has 13 hues, each with 10 levels of\nbrightness, totaling 130 different colors. All of the values are there, it would\nbe easy for our Frontend team to get started by importing it as a dependency.\nThis was starting to look very promising and we were getting excited about this\nquick start.\n\nHowever, the more we thought about our current needs and goals, the more we\nrealized that this approach wasn’t going to work for us. Existing color palettes\nusually had too many colors for our needs and the ones we did need, would have\nto be tweaked to align with our brand colors. All of the upsides of using an\nexisting color palette were now irrelevant.\n\nWe went back to the drawing board, starting with defining the goals we wanted\nour new color palette to achieve:\n\n- Align with and extend our brand colors\n- Have only the hues that we need, the colors that have meaning in the UI\n- Be accessible by passing the WCAG\n\n### 1. Extending the brand\n\nThe first step in creating our new color palette was inspired by “[Add Colors To Your Palette With Color Mixing][viget-article],”\nwhere we used [ColorSchemer Studio](http://www.colorschemer.com/osx_info.php)\nto generate this color wheel from the [three brand colors](https://gitlab.com/gitlab-com/gitlab-artwork/blob/9b07772f44a9fa51f395a95928a6e41c61a5b1cb/colors)\nand the [primary purple used on this site](https://gitlab.com/gitlab-com/www-gitlab-com/blob/9c4a9b653f013483d5053c1da30cba6d4bb96bd5/source/stylesheets/_variables.scss#L16):\n\n{: .text-center}\n![Color wheel generated from the brand colors](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/color-wheel.png){:style=\"width:350px\"}\n\nInitial colors were separated by even intervals of hue and manually tweaked. In\nthe image above, the matching brand colors are next to the wheel for reference.\n\n### 2. Cutting the rainbow\n\nThen, we generated tints and shades for some of the hues in that color wheel:\ngreen, blue, purple, red and orange.\n\n{: .text-center}\n![Tints and shades](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/tints-shades.png){:style=\"width:451px\"}\n\nThese were first obtained from the [Material Design Palette Generator](http://mcg.mbitson.com/)\nand then tweaked manually using [Colorizer](http://colorizer.org/) and Eric\nMeyer’s [Color Blender](https://meyerweb.com/eric/tools/color-blend). The dark\norange colors are a good example of manual tweaking as they initially looked\nvery “muddy.”\n\nIt’s important to consider the number of tints and shades that you need, as that\naffects the flexibility when applying those colors. Our guiding principle here\nwas to provide clear and visible contrast between each step of the scale. If we\nhad steps that were too similar, the difference wouldn’t be noticeable, which\nmeant that there was no value in having those colors.\n\nWe didn’t want all of the colors of the rainbow, just the ones that _carry\nmeaning effectively_. We want to be able to communicate states and actions by\napplying colors to elements in the UI (e.g. informational elements are\nassociated with blue). If you have too many similar colors in a UI, like green\nand lime, you’re expecting too much not only of your users but also of your\nteam. On the one hand, most of your users won’t notice the difference between\ncolors when placed in a complex UI, so they also won’t pick up the different\nmeanings. On the other hand, your team will have more work learning, working\nwith, and maintaining unnecessary colors.\n\nAdditionally, we shouldn’t rely on color alone to communicate something, so\nthat’s also another point for not having too many similar colors. This is\nactually one of the success criteria of the WCAG about the [use of color](https://www.w3.org/TR/UNDERSTANDING-WCAG20/visual-audio-contrast-without-color.html):\n\n> Color is not used as the only visual means of conveying information,\n> indicating an action, prompting a response, or distinguishing a visual\n> element.\n\n### 3. Colors for everyone\n\nUsing a small set of colors which allows for better memorization and recognition\nis already a good step towards a more usable product, but it’s not enough.\n\n[Evaluating, testing, and prioritizing accessibility problems](https://gitlab.com/groups/gitlab-org/-/epics/31)\nis one of our main initiatives here at GitLab. Establishing contrast between\ntext and background is one of the key aspects of accessibility and, as we saw\nbefore, our previous color palette didn’t meet the [WCAG contrast\nratios][wcag-contrast]. So, as we were defining our new color palette, we\ncontinually tested the colors using the [WebAIM Color Contrast Checker](https://webaim.org/resources/contrastchecker/).\n\nAlong the way, we hit a problem: combinations of _white_ text over _green_ or\n_orange_ backgrounds did not pass **WCAG level AA for small text**. This was an\nissue because we wanted to keep a uniform “vibrancy” and “pop” throughout all\ncolors. While the colors looked uniform to our human eye, the WCAG test didn’t\n“see” them as we did. Would we be forced to “break” this visual consistency and\nuse darker shades for those colors? Not only that, but this would render them too\ndark to _carry meaning effectively_. In the following example, the “success”\nmeaning of green or the “warning” meaning of orange become less immediate as\ntheir contrast increases.\n\n![Warning and success elements can be more or less noticeable but that affects the result of the WCAG contrast tests](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/problematic-colors.png)\n\nWe found an interesting take on this at the [Google Design](https://design.google/)\nwebsite, which intentionally uses colors that at least pass **AA for large\ntext**:\n\n> Due to this site’s purpose being a source for visual design reference\n> and inspiration, we felt it was acceptable not to target a stronger color\n> contrast level. — [Behind the Code — Google Slash Design Accessibility](http://www.instrument.com/articles/google-slash-design-accessibility)\n\nConsidering our audience and user base, should we be rigid and enforce **AA\nlevel for small text**? As a first step towards better color contrasts, we\ndecided to set our minimum at **AA for large text**, even for _small text_. For\ngrays, we [tested and tweaked their contrast against light gray backgrounds][ce#36675],\nas that is a common color used to differentiate regions in the UI.\n\n{: .text-center}\n![All tints and shades with corresponding WCAG levels, including grays](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/tints-shades-wcag.png){:style=\"width:567px\"}\n\n## Color priorities\n\nSo, after all this work, we introduced a wide range of color tints and shades\nwith the new color palette. The problem was that there was no guidance for using\nthem. Some color decisions are fairly quick and intuitive, but we wanted to\nstandardize and make the color selection process as objective as possible for\neveryone, even developers. We want to give people the chance to make a decision\nwithout imposing approval or reviews by the UX team. We want to be [lean, efficient, and focus on results](https://handbook.gitlab.com/handbook/values/).\n\nSome questions that we should be able to answer:\n\n- “I need to use one blue, which shade should I pick?”\n- “This UI component needs three contrasting shades of green. Can I pick\n  whichever I want?”\n\nThe [Material Design colors](https://material.io/guidelines/style/color.html)\nhave been a great source of inspiration for us. They follow the numeric naming\nconventions used by the [CSS `font-weight` property](https://www.w3.org/TR/css-fonts-3/#font-weight-prop),\nwhere a higher value equals a higher degree of blackness. So, we’ve named our\ncolors from the lightest (**50**) to the darkest (**950**).\n\nOn top of this naming scheme, we’ve defined a system of color priorities. This\nis similar to how different font weights are used to create contrasting\ntypography that communicates hierarchy.\n\nWe can apply this same logic to colors, as seen in the image below, by tagging\nthem according to their priority: from **1** to **4**. If you need guidance, the\npriorities can help you make better choices. When choosing how to apply color to\na UI component:\n\n- You start at priority **1**, which is the medium weight **500**. There’s only\n  one shade with priority 1 per color (the “default” shade).\n- For more shades of the same color, you could then choose from the next\n  priority level, number **2**, which can either be **300** (lighter) or **700**\n  (darker). And so forth for even lighter or darker shades.\n\n![All tints and shades with corresponding priorities, names, and WCAG levels, including grays](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/color-priorities-system.png)\n\n## What’s next\n\nAlong the way, we’ve learned that [mixing colors and defining color palettes](https://books.google.com/books?id=R4qwDQAAQBAJ)\nis not only science, nor only art, it’s a subjective balance on the human mind.\nColor harmony depends on many factors, like culture, age, social status, or even\nthe [designer’s intent](http://www.aic-color.org/journal/v1/jaic_v1_review.pdf).\n\nWe’ll have to see how people use the 11 tints and shades and how they’re applied\nin our [Design System][ds]. This is a constant evolution, and we’re always\niterating (as we should be).\n\nNext, we’re going to review our [color meaning guidelines](https://design.gitlab.com/)\nand be more active in their usage, not only in the product but also in our\n[Design System][ds] and [pattern library](https://gitlab.com/gitlab-org/gitlab-design/blob/master/gitlab-elements.sketch).\n\nA new color palette and a color priority system are seemingly small steps\ntowards a better user experience throughout GitLab, but they do make a big\ndifference, for our users, our team, and every contributor. This is the first\ninitiative to polish our UI styles, next we’re implementing our new [type scale](https://gitlab.com/gitlab-org/gitlab-ce/issues/24310)\n– which will deserve a dedicated blog post.\n\nIf you have any questions, feel free to [post a comment on the community forum](https://forum.gitlab.com/new-topic?tags=blog-feedback),\n[tweet at us](https://twitter.com/gitlab), or join the discussion on the\nfollowing issues:\n\n- [Change chromatic/full colors to a more harmonious palette][ce#28614]\n- [Define color priorities][ce#31094]\n- [Define a pure gray color scale][ce#36675]\n",[9,1698,700,5429],"UI",{"slug":5431,"featured":6,"template":680},"polishing-gitlabs-ui-a-new-color-system","content:en-us:blog:polishing-gitlabs-ui-a-new-color-system.yml","Polishing Gitlabs Ui A New Color System","en-us/blog/polishing-gitlabs-ui-a-new-color-system.yml","en-us/blog/polishing-gitlabs-ui-a-new-color-system",{"_path":5437,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5438,"content":5444,"config":5449,"_id":5451,"_type":14,"title":5452,"_source":16,"_file":5453,"_stem":5454,"_extension":19},"/en-us/blog/power-of-iteration",{"title":5439,"description":5440,"ogTitle":5439,"ogDescription":5440,"noIndex":6,"ogImage":5441,"ogUrl":5442,"ogSiteName":667,"ogType":668,"canonicalUrls":5442,"schema":5443},"How iteration helps build our product and improve our work lives","One of GitLab’s core values, iteration permeates everything we do from UX design to product development. And when it comes to our work lives, iteration is a game changer.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681060/Blog/Hero%20Images/iteration.jpg","https://about.gitlab.com/blog/power-of-iteration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How iteration helps build our product and improve our work lives\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-02-04\",\n      }",{"title":5439,"description":5440,"authors":5445,"heroImage":5441,"date":5446,"body":5447,"category":808,"tags":5448},[869],"2020-02-04","\n\n*it-er-a-tion*\n\n_/ˌidəˈrāSH(ə)n/_\n\n_noun_\n\n_the repetition of a process or utterance._\n\n_repetition of a mathematical or computational procedure applied to the result of a previous application, typically as a means of obtaining successively closer approximations to the solution of a problem._ – Oxford Dictionary via Lexico\n\nAt GitLab iteration is simply what we do – with everything. CEO [Sid Sijbrandij](/company/team/#sytses) explains that even in the very early stages of GitLab, when the company was in the [Y Combinator](https://www.ycombinator.com) \"incubator,” he knew iteration was the right choice because even though it seems contradictory, you can go faster by breaking things down into smaller pieces. \"There were people, even at the time, who suggested that we should slow down. The response from GitLab has always been, 'No, we'll get the most we can get done. The smaller we split things up, the smaller the steps we take, the faster we can go.'\"\n\nIt’s not surprising that iteration is one of GitLab’s [six core values](https://handbook.gitlab.com/handbook/values/), and you don’t have to look too closely to see how it steers our product development. When we wanted to make our [error tracking feature](/blog/iteration-on-error-tracking/) stronger, we \"scoped” the project down and made small changes more quickly.\n\nOur user experience team took the same approach when [trying to improve usability](/blog/how-ux-research-impacts-product-decisions/), and [when we migrated](/blog/gitlab-journey-from-azure-to-gcp/) from Microsoft’s Azure to the Google Cloud Platform we used iteration to guide our process.\n\nBut perhaps where iteration shines brightest at GitLab is at the individual level where the ability to take small steps frees employees to take risks and be creative. This is something that’s obvious even if you’re a [brand new employee](/blog/agile-iteration-unique-onboarding-experience/).\n\nWe asked six team members to explain the impact of iteration on their work lives.\n\n[Heather Simpson](/company/team/#hsimpson), senior external communications analyst:\n\"Honestly, the ability to throw something out there without being judged because it’s not completely formed and polished is new and refreshing for me.  I know I’ve got teammates ready to collaborate and help me strengthen my ideas and the end result.”\n\n[Ashish Kuthiala](/company/team/#kuthiala), senior director of Product Marketing:\n\"It helps us create a culture and organization that learns very fast and creates a self-learning and always improving organization.  We cannot and do not always get things right but we learn and improve really rapidly.”\n\n[Emily Kyle](/company/team/#Emily), manager, Corporate Events and Branding:\n\"It allows me to be a bit bolder and braver in coming up with out of the box solutions and in my decision making. Small steps make change so much easier to achieve.”\n\n[Tina Sturgis](/company/team/#TinaS), manager, Partner and Channel Marketing:\n\"Iteration for me is a game changer at GitLab. Gone are the days of getting everyone's buy-in prior to rolling out messaging. Put it out there and people will iterate on it making it better. If my messaging was off, no worries – iterate on what it is NOT and keep driving to results.\"\n\n[Lorie Whitaker](/company/team/#loriewhitaker), senior UX researcher: \"To a UX researcher iteration means something different to me than other people. The value of iteration should encourage people to change directions when they find answers to their questions. Iteration should be a stop-gap measure to say ‘This is not the right solution. We will stop and reassess and rethink what is the right solution to this problem.’”\n\n[Lee Matos](/company/team/#lbot), Support engineering manager:\n\"Iteration is hard because at first it feels unnatural, but once you learn how to really iterate, it's liberating. You can keep being nimble which is huge.\"\n\nCover image by Eryk on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[811,9,810],{"slug":5450,"featured":6,"template":680},"power-of-iteration","content:en-us:blog:power-of-iteration.yml","Power Of Iteration","en-us/blog/power-of-iteration.yml","en-us/blog/power-of-iteration",{"_path":5456,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5457,"content":5463,"config":5467,"_id":5469,"_type":14,"title":5470,"_source":16,"_file":5471,"_stem":5472,"_extension":19},"/en-us/blog/preventing-burnout-a-managers-toolkit",{"title":5458,"description":5459,"ogTitle":5458,"ogDescription":5459,"noIndex":6,"ogImage":5460,"ogUrl":5461,"ogSiteName":667,"ogType":668,"canonicalUrls":5461,"schema":5462},"Preventing burnout: A manager's toolkit","GitLab CEO Sid Sijbrandij shares 12 steps that managers can take to help employees avoid burnout.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664472/Blog/Hero%20Images/gitlabflatlogomap.png","https://about.gitlab.com/blog/preventing-burnout-a-managers-toolkit","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Preventing burnout: A manager's toolkit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-05-03\",\n      }",{"title":5458,"description":5459,"authors":5464,"heroImage":5460,"date":3578,"body":5465,"category":808,"tags":5466},[950],"Working at a startup is demanding. GitLab team members are often under a lot of pressure. From mental health awareness to our posts on [identifying burnout](/blog/preventing-burnout/), GitLab wants to ensure our team members are working efficiently without feeling overwhelmed. Recently, GitLab co-founder and CEO Sid Sijbrandij and Michelle Hodges, vice president of Global Channels, discussed how managers can support their team members and help prevent burnout.\n\nSid and Michelle emphasized that the earlier a manager can identify burnout the better. Identifying burnout in a remote environment is more difficult than in a co-located workplace, but looking for early hallmarks such as exhaustion and reduced enthusiasm can help managers get ahead of the problem. \n\nSid shared the following 12 strategies managers can utilize to support their team and prevent burnout:  \n\n1. **Encourage time off.** Even taking a half day can help. Managers can take an active role in encouraging team members to take time off by telling their team members about their own upcoming vacations. Managers can ask team members when their next vacation is and, if they don’t have one, encourage them to plan one.\n\n1. **Lower the pressure.** When a manager senses that someone on their team may be getting close to burnout, they can lower the pressure of goals and [objectives and key results (OKRs)](/company/okrs/) and also ask about goals less frequently.\n\n1. **Be more positive.** Frankly, managers can be a significant source of stress, so try to be more positive about the team member and their reports. \n\n1. **Increase headcount.** Most of the time, there’s too much work for too few people, so managers can explore options to increase headcount. This can be temporary, such as borrowing time from someone on another team or hiring a consultant. \n\n1. **Offer team members coaching.** External coaching can help team members open up about their struggles, including working with their manager. \n\n1. **Remind employees of mental health care resources.** Point employees toward the company's mental health benefits and services. GitLab provides support to all team members through [ModernHealth](/handbook/total-rewards/benefits/modern-health/).\n\n1. **Express gratitude.** Send team members gifts to their home to show gratitude and an investment in your personal relationship. \n\n1. **Celebrate progress.** Burnout is often caused by a feeling of stagnation. Seeing the progress you’re making day-to-day is hard. Managers should create space to celebrate small wins and reflect on the mountains you’ve climbed. \n\n1. **Sympathize.** The work is tough. Have conversations about it. \n\n1. **Lead by example.** Managers should set and maintain working hours. For instance, Sid says he waits until the next working day to respond to Slack messages that happen after 6 p.m. \n\n    Help team members to be more effective by: \n    - Reviewing recurring meetings and [identifying what can be done async](/company/culture/all-remote/meetings/#2-cancel-unnecessary-meetings)\n    - Talking about what they're working on and helping them identify what work isn’t as important\n    - Identifying work that can be delegated to other team members, and empowering them to do so\n\n    Managers can also encourage team members to name things they won’t do. \n\n1. **Reduce the number of hours worked by agreeing to reduce effort.** Managers can ask team members to identify things that are likely to fail. Taking time to reflect on results can be very insightful and can allow team members to reduce their effort without compromising quality.\n\n1. **Share burnout concerns with others.** Using judgement or with permission, managers can give context and ask others to take it easy on specific team members when necessary.\n\nWatch the full conversation below.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9VO0H28QEz8\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n",[9,832,810],{"slug":5468,"featured":6,"template":680},"preventing-burnout-a-managers-toolkit","content:en-us:blog:preventing-burnout-a-managers-toolkit.yml","Preventing Burnout A Managers Toolkit","en-us/blog/preventing-burnout-a-managers-toolkit.yml","en-us/blog/preventing-burnout-a-managers-toolkit",{"_path":5474,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5475,"content":5481,"config":5486,"_id":5488,"_type":14,"title":5489,"_source":16,"_file":5490,"_stem":5491,"_extension":19},"/en-us/blog/preventing-burnout",{"title":5476,"description":5477,"ogTitle":5476,"ogDescription":5477,"noIndex":6,"ogImage":5478,"ogUrl":5479,"ogSiteName":667,"ogType":668,"canonicalUrls":5479,"schema":5480},"GitLab team members share how to recognize burnout (and how to prevent it)","Burning out is a common feeling at startups – here's what we're doing to address it at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680178/Blog/Hero%20Images/gitlabbers-share-how-to-recognize-burnout.jpg","https://about.gitlab.com/blog/preventing-burnout","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab team members share how to recognize burnout (and how to prevent it)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Clement Ho\"}],\n        \"datePublished\": \"2018-03-08\",\n      }",{"title":5476,"description":5477,"authors":5482,"heroImage":5478,"date":5483,"body":5484,"category":808,"tags":5485},[4781],"2018-03-08","\n\nThe feeling of [burning out][mayo-clinic] is common for people working at startups. Oftentimes, if you are feeling burned out, you aren't the only one feeling that way. I chatted to some GitLab team members about how they knew they were burned out, and how they get back on track.\n\n\u003C!-- more -->\n\nIt's easy to burn out when you work remotely. It's easy to work straight through lunch, and feel like you must put in extra hours to help finish a big project. With monthly releases, many features feel extra important and necessary to put in extra time. This isn't ideal because pacing yourself actually works out cheaper in the long run, as burning out takes extra time for recovery.\n\nDuring the last [summit](/events/gitlab-contribute/), [Marin][marin] led a session about preventing burnout, thanks Marin! A lot of GitLab team members attended that session and many had similar feelings of either being burned out or feeling like they are on their way towards it. Some even mentioned that they were starting to experience those physical signs of feeling burned out (e.g. frequent headaches). After the summit, we as a team added more resources to the handbook and created some tools on how we as a team can recognize and prevent burnout.\n\n## How to recognize if you're burned out, according to GitLab team members\n\n### You're constantly tired\n\n>For me, the greatest signal of burnout is struggling to get out of bed in the morning. I tend to stick to pretty standard working hours so when I work late in the evening, multiple nights in a row, I start to struggle to get up in the morning or even lose track of what day it is. I recognize this as burnout because usually it isn't hard for me to get up and get my day started. In fact, I'm usually up long before I start work so I can make breakfast, walk my dog, do some creative writing. But when I'm burned out, I will wait until 8 or 8:30 to get up and go straight to the computer like a zombie. - Erica Lindberg, former manager, Global Content\n\n>I didn't realize I was burned out until I finally took a vacation. I experienced many symptoms but was not aware of it and since I was experiencing them for so long, I thought it was normal. I was extremely tired all the time and whenever I decided to take a break during the day, I would often fall asleep with my laptop on my lap. - Anonymous GitLab team member\n\n### You no longer enjoy things\n\n> I started losing my general feelings of enjoyment in life. Even the fun activities I had planned, weren't activities I looked forward to. - Anonymous GitLab team member\n\n### Your job performance suffers\n\n>I would put in extra hours to make up for my productivity but it still didn't seem to measure up with my past performance. - [Jacob Schatz, Frontend Lead](/company/team/#jakecodes)\n\n### Your relationships are strained\n\n>I would also have a hard time remembering information, so much so that my friends began noticing the difference in me. I found myself being agitated and angry towards the people around me but couldn't figure out the reason. - Anonymous GitLab team member\n\n## How to prevent burnout, according to GitLab team members\n\n### Set clear boundaries between work and home\n\n>I'm trying to limit how many days I allow myself to work over eight hours by either scheduling other activities in the evening with friends or my partner (it works better when you've committed to someone so they can help hold you accountable. These things can be anything from rock climbing to dinner or watching a movie) or simply blocking out my calendar and setting reminders for when it's time to shut off. And when it is time to shut off I'm come up with a \"ritual\" of shutting down my computer, turning off my keyboard, monitor, and light in my office – this makes it harder to come back to \"just finish up one last thing\" - [Erica Lindberg, Content Marketing Manager](/company/team/#EricaLindberg_)\n\n>In order for me to prevent myself from burning out, I follow several rules. I make sure I only work seven hours a day and spend two additional hours learning. I dedicate at least seven hours of sleep every day, and I make sure I go to the gym and eat healthy regularly as part of my daily routine. - Anonymous GitLab team-member\n\n### Take vacation\n\n>After my vacation, where I did absolutely nothing except enjoying nature, I came home feeling much more energized. I am now a happier person. I am less sleepy and agitated and have found myself much more productive than ever before. That week of vacation gave me years of my life back that I would have never gotten if I didn't truly disconnect from work. - [Jacob Schatz, Frontend Lead](/company/team/#jakecodes)\n\n### Know when to take a break\n\n>Last week, I was feeling really tired and emotional (upset and stressed) about certain things. When I noticed that, I cancelled my last meeting of the day last minute, even though it was with [Sid](/company/team/#sytses). I wouldn’t have been productive and able to deal with the stress. So I took off the rest of the day. I was 10x better equipped to handle things the next day. - Job van der Voort, former VP of Product\n\n### Switch off when you're away from work\n\n>I try to stop thinking about work over the weekends or in the evenings. I practice meditation, mindfulness, and deep breathing. - [Suri Patel, Content Marketing Associate](/company/team/#suripatel)\n\n### Don't suffer in silence\n\n>I experienced burnout at my previous company. If it were to happen again, I would speak to my manager and openly discuss my situation, telling him or her that the pace is not sustainable and that something needs to change. It might be a scary topic to discuss, but burnout doesn't just affect my professional life – it has an impact on my personal life, most importantly on my health, so having these transparent conversations is a necessity. I would speak to my manager as soon as I started feeling overwhelmed over a prolonged period of time. There will always be phases when we have to work more than usual, but if long hours become a norm, then it's something that needs to be addressed right away. - Anonymous GitLab team member\n\n### Other good habits to prevent burnout:\n\n- Don't go straight to work after you wake up. Try not to start working within 30 minutes of waking up\n- Remove Slack from your smartphone or at the very least, turn off notifications for it\n- Keep each other accountable. When you notice someone in a different time zone should be asleep, tell them\n- Use your Slack status to share a message with the team that you are unavailable\n- Schedule [random coffee breaks][random-coffee-breaks]\n\n## Changes we added to the handbook\n- [Encourage team members to communicate with their manager when they recognize burnout][handbook-burnout]\n- [Encourage team members to notice signs of burnout in their peers and direct reports][handbook-burnout]\n- [Added tips to avoid burnout][handbook-burnout]\n\nWhat are some strategies you have to prevent yourself from burning out? Please comment below. We'd love to continue being proactive against burning out.\n\n[Photo](https://unsplash.com/photos/MAGAXAYq_NE) by [Victoria Heath](https://unsplash.com/@vheath) on Unsplash\n{: .note}\n\n[mayo-clinic]: http://www.mayoclinic.org/healthy-lifestyle/adult-health/in-depth/burnout/art-20046642\n[random-coffee-breaks]: /handbook/communication/#random-room\n[handbook-burnout]: /handbook/paid-time-off/#recognizing-burnout\n[marin]: https://gitlab.com/marin\n[unsplash-photo]: https://unsplash.com/photos/_k31aFqnmTM\n[unsplash-credit]: https://unsplash.com/photos/_k31aFqnmTM?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\n[unsplash]: https://unsplash.com/@rikkichan89?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\n",[832,810,9],{"slug":5487,"featured":6,"template":680},"preventing-burnout","content:en-us:blog:preventing-burnout.yml","Preventing Burnout","en-us/blog/preventing-burnout.yml","en-us/blog/preventing-burnout",{"_path":5493,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5494,"content":5499,"config":5505,"_id":5507,"_type":14,"title":5508,"_source":16,"_file":5509,"_stem":5510,"_extension":19},"/en-us/blog/puma-nakayoshi-fork-and-compaction",{"title":5495,"description":5496,"ogTitle":5495,"ogDescription":5496,"noIndex":6,"ogImage":3844,"ogUrl":5497,"ogSiteName":667,"ogType":668,"canonicalUrls":5497,"schema":5498},"Ruby 2.7: Understand and debug problems with heap compaction","An overview of Ruby 2.7 heap compaction and the risks it adds to production Rails applications.","https://about.gitlab.com/blog/puma-nakayoshi-fork-and-compaction","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ruby 2.7: Understand and debug problems with heap compaction\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matthias Käppler\"}],\n        \"datePublished\": \"2021-04-28\",\n      }",{"title":5495,"description":5496,"authors":5500,"heroImage":3844,"date":5502,"body":5503,"category":743,"tags":5504},[5501],"Matthias Käppler","2021-04-28","\n\nThe GitLab Rails application runs on [Puma](https://puma.io/), a multi-threaded Rack application server written in the new Ruby.\nWe recently updated Puma to major version 5, which introduced [a number of important\nchanges](https://github.com/puma/puma/blob/master/History.md#500--2020-09-17),\nincluding support for _compaction_, a technique to reduce memory fragmentation in the\nRuby heap.\n\nIn this post we will describe what Puma's \"nakayoshi fork\" does, what compaction is,\nand some of the challenges we faced when first deploying it.\n\n## Nakayoshi: A friendlier `fork`\n\nPuma 5 added a new configuration switch: `nakayoshi_fork`. This switch affects Puma's behavior when\nforking new workers from the primary process. It is largely based on a [Ruby gem of the same name](https://github.com/ko1/nakayoshi_fork)\nbut adds new functionality. More specifically, enabling `nakayoshi_fork` in Puma will result in two additional\nsteps prior to forking into new workers:\n\n1. **Tenuring objects.** By running several minor garbage collection cycles ahead of a `fork`, Ruby can promote survivors\n   from the young to the old generation (referred to as \"tenuring\"). These objects are often classes, modules, or long-lived\n   constants that are unlikely to change.\n   This process makes forking copy-on-write friendly because tagging an object as \"old\" implies a write\n   to the underlying heap page. Doing this prior to forking means the OS won't have\n   to copy this page from the parent to the worker process later. We won't be discussing copy-on-write in detail but\n   [this blog post offers a good introduction to the topic and how it relates to Ruby and pre-fork servers](https://brandur.org/ruby-memory).\n\n1. **Heap compaction.** Ruby 2.7 added a new method `GC.compact`, which\n   will reorganize the Ruby heap to pack objects closer together when invoked. `GC.compact` reduces Ruby heap fragmentation and\n   potentially frees up Ruby heap pages so that the physical memory consumed can be reclaimed by the OS.\n   This step only happens when `GC.compact` is available in the version of Ruby that is in use (for MRI, 2.7 or newer).\n\nIn the remainder of this post, we will look at:\n\n* How `GC.compact` works and its potential benefits.\n* Why using C-extensions can be problematic when using compaction.\n* How we resolved a production incident that crashed GitLab.\n* What to look out for before enabling compaction in your app, via `nakayoshi_fork` or otherwise.\n\n## How compacting garbage collection works\n\nThe primary goal of a compacting garbage collector (GC) is to use allocated memory more\neffectively, which increases the likelihood of the application using less memory over time.\nCompaction is especially important when processes can share memory, as is the case with Ruby pre-fork\nservers such as Puma or Unicorn. But how does Ruby accomplish this?\n\nRuby manages its own object heap by allocating chunks of memory from the operating system called pages\n(a confusing term since Ruby heap pages are distinct from the smaller memory pages managed by the OS itself).\nWhen an application asks to create a new object, Ruby will try to find a free object slot in one of these\npages and fill it. As objects are allocated and deallocated over the lifetime of the application,\nthis can lead to fragmentation, with pages being neither entirely full nor entirely empty. This is the\nprimary cause for Ruby's infamous runaway memory problem: Since the available space isn't optimally used,\npages will rarely be entirely empty and become \"tomb pages\" which means it is necessary for the pages to be empty for them to be deallocated.\n\nRuby 2.7 added a new method, `GC.compact`, which aims to address this problem by walking the entire\nRuby heap space and moving objects around to obtain tightly packed pages. This process will ideally make\nsome pages unused, and unused memory can be reclaimed by the OS. [Watch this video from RubyConf 2019](https://www.youtube.com/watch?v=H8iWLoarTZc) where Aaron Patterson, the author of this feature, gave a good introduction to compacting GC.\n\nCompaction is a fairly expensive task since Ruby needs to stop-the-world for a complete heap reorganization so\nits best to perform this task before forking a new worker process, which is why Puma 5 included this step when performing `nakayoshi_fork`. Moreover, running compaction before forking\ninto worker processes increases the chance of workers being able to share memory.\n\nWe were eager to enable this feature on GitLab to see if it would reduce memory consumption, but things didn't entirely go as planned.\n\n## Inside the incident\n\nAfter extensive testing via our automated performance test suite and in preproduction\nenvironments, we felt ready to explore compaction on production nodes. We kept a\n[detailed, public record of what happened\nduring this production incident](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/3370), but the key details are summarized below:\n\n* The deployment passed the canary stage, meaning workers who had their heaps compacted were serving traffic\n  successfully at this point.\n* Sometime during the full fleet rollout, problems emerged: Error rates started spiking but not\n  across the entire fleet. This phenomenon is odd because errors tend to spread across all workers due to load balancing.\n* The error messages surfacing in Sentry were mysterious at best:\n  `ActionView::Template::Error\nuninitialized constant #\u003CClass:#GrapePathHelpers::DecoratedRoute:0x00007f95f10ea5b8>::UNDERSCORE`. Remember this error message for later.\n* We discovered the affected workers were segfaulting in [`hamlit`](https://github.com/k0kubun/hamlit),\n  a high-performance HAML compiler. Hamlit uses a C-extension to achieve better performance. The segfaulting and the fact\n  that we were rolling out an optimization that touches GC-internal structures was a tell-tale sign that\n  compaction was likely to be the cause.\n* We rolled back the change to quickly recover from the outage.\n\n## How we diagnosed the problem\n\nWe were disappointed by this setback and wanted to understand why the outage occurred. Fortunately,\nRuby provides detailed stack traces when crashing in C-extensions. The most effective way\nto quickly analyze these is to look for transitions where a C-extension calls into the Ruby VM\nor vice versa. These lines therefore caught our attention:\n\n```shell\n...\n/opt/gitlab/embedded/lib/libruby.so.2.7(sigsegv+0x52) [0x7f9601adb932] signal.c:946\n/lib/x86_64-linux-gnu/libc.so.6(0x7f960154c4c0) [0x7f960154c4c0]\n/opt/gitlab/embedded/lib/libruby.so.2.7(rb_id_table_lookup+0x1) [0x7f9601b15e11] id_table.c:227\n/opt/gitlab/embedded/lib/libruby.so.2.7(rb_const_lookup+0x1e) [0x7f9601b4861e] variable.c:3357\n/opt/gitlab/embedded/lib/libruby.so.2.7(rb_const_get+0x39) [0x7f9601b4a049] variable.c:2339\n# ^--- Ruby VM functions\n/opt/gitlab/embedded/lib/ruby/gems/2.7.0/gems/hamlit-2.11.0/lib/hamlit/hamlit.so(str_underscore+0x16) [0x7f95ee3518f8] hamlit.c:17\n/opt/gitlab/embedded/lib/ruby/gems/2.7.0/gems/hamlit-2.11.0/lib/hamlit/hamlit.so(rb_hamlit_build_id) hamlit.c:100\n# ^-- hamlit C-extension\n...\n```\n\nThe topmost stack frame reveals the preceeding calls led to a segmentation fault (`SIGSEGV`).\nWe highlighted the lines where Hamlit calls back into Ruby: In a function called `str_underscore` which\nwas called by `rb_hamlit_build_id`. The `rb_*` prefix tells us that this is a C-function we can call from Ruby,\nand indeed it is used by [`Hamlit::AttributeBuilder`](https://github.com/k0kubun/hamlit/blob/master/lib/hamlit/attribute_builder.rb) to construct DOM `id`s.\n\nBut we still don't know why it is crashing. Next, we need to inspect what happens in `str_underscore`.\nWe can see that this function performs a constant lookup on `mAttributeBuilder` – searching\nfor a constant called `UNDERSCORE`. When following the breadcrumbs it turns out to simply be the string `\"_\"`.\nIt is this lookup that failed.\n\nWait -- `UNDERSCORE`? That sounds familiar. Recall the top-level error messages:\n\n```\nActionView::Template::Error\nuninitialized constant #\u003CClass:#GrapePathHelpers::DecoratedRoute:0x00007f95f10ea5b8>::UNDERSCORE\n```\n\nBut `GrapePathHelpers` is clearly not a Hamlit class. Hamlit is trying to look up its own `UNDERSCORE`\nconstant on a class in the [`grape`](https://github.com/ruby-grape/grape) gem, an entirely different library\nthat is not involved in HTML rendering at all and there is no such constant defined on Grape's\n`DecoratedRoute` class either.\n\nNow the penny dropped – remember how compaction moves around objects in Ruby's heap space? Classes in\nRuby are objects too, so `GC.compact` must have moved a Grape class into an object slot that was previously\noccupied by a Hamlit class object, but Hamlit's C-extension never saw it coming!\n\n## How we solved the problem\n\nTo be clear, what happened above should _not_ happen with a well-behaved C-extension. Compaction\nwas developed carefully with support for C-extensions that predate Ruby 2.7, so all\nexisting Ruby gems would continue to operate normally.\n\nSo what went wrong? When a C-extension allocates Ruby objects, it must _mark_ them for as long as\nthey are alive. A marked object will not be garbage collected and because the Ruby GC cannot reason about objects\noutside of its own purview (i.e., objects created from Ruby code), it needs to rely on C-extensions\nto correctly mark and unmark objects themselves.\n\nNow comes the twist: Marked objects can be moved during compaction and existing C-extensions\ncan't cope with an object they hold pointers to suddenly move into a different slot.\nTherefore, Ruby 2.7 does something clever: It \"pins\" objects allocated with the mark function that existed prior\nto Ruby 2.7, meaning the pinned objects are not allowed to move during compaction. For new code, it introduces\na special mark-but-don't-pin function that will also allow an object to move, giving gem authors the\nopportunity to make their libraries compaction-aware.\n\nHamlit does not implement compaction support, so this could only mean one thing:\nHamlit wasn't even properly marking those objects, otherwise Ruby 2.7\nwould have automatically pinned them so they wouldn't move during compaction.\nAfter [discussing an attempted fix we submitted](https://github.com/k0kubun/hamlit/pull/171) but without\na reliable way to reproduce the issue for everyone, the Hamlit author decided to sidestep the\nproblem by [resolving those constants statically instead](https://github.com/k0kubun/hamlit/pull/172)\nand marking each via `rb_gc_register_mark_object`.\nThis change landed in [Hamlit 2.14.2](https://github.com/k0kubun/hamlit/blob/master/CHANGELOG.md#2142---2021-01-21)\nwhich we confirmed resolves the issue.\n\n## The next steps\n\nIt is exciting to see that the Ruby community is making progress on making Ruby a more memory-efficient\nlanguage but we learned that we need to step carefully when introducing such wide-reaching changes to a large\napplication like GitLab. It is difficult to investigate and fix problems that crash the Ruby VM, which is more likely for\nany library that uses C-extensions.\n\nTwo particular action items we took away from this were:\n\n1. **More reliable detection of compaction-related issues in CI.** We're not going to sugar-coat this:\n   We detected the problem late. Our comprehensive test suite was passing, our QA and performance tests\n   on staging environments passed, and the problem didn't even show up in canary deployments. Ideally, we\n   would have caught this issue with our automated test suite. One way to test whether compaction causes problems\n   is by using `GC.verify_compaction_references` – this is a rather crude tool because it requires\n   keeping two copies of the Ruby heap, which can be prohibitively expensive in terms of memory use. We\n   have therefore not yet decided how to approach this.\n1. **Improve our ability to roll out system configuration gradually.** Puma is part of our core infrastructure,\n   since it sits in the path of every web request, which makes it especially risky to experiment with Puma\n   configuration. GitLab already supports [feature flags](https://docs.gitlab.com/ee/development/feature_flags/index.html)\n   to allow developers to roll out product changes gradually, but it presents us with a catch-22 when\n   making changes at the infrastructure level, because to query the state of a feature flag, the infrastructure\n   needs to already be up and running. It would be ideal to have a similar mechanism for system configuration, [which we are currently exploring](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/154).\n\nWhile performance is a major focus for us at the moment it must not compromise availability.\nWe will continue to monitor developments in the Ruby community around compaction support, but decided to\nnot use it in production at this point in time since the gains don't appear to outweigh the risks.\n",[2396,1295,9],{"slug":5506,"featured":6,"template":680},"puma-nakayoshi-fork-and-compaction","content:en-us:blog:puma-nakayoshi-fork-and-compaction.yml","Puma Nakayoshi Fork And Compaction","en-us/blog/puma-nakayoshi-fork-and-compaction.yml","en-us/blog/puma-nakayoshi-fork-and-compaction",{"_path":5512,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5513,"content":5519,"config":5523,"_id":5525,"_type":14,"title":5526,"_source":16,"_file":5527,"_stem":5528,"_extension":19},"/en-us/blog/pyb-all-remote-mark-frein",{"title":5514,"description":5515,"ogTitle":5514,"ogDescription":5515,"noIndex":6,"ogImage":5516,"ogUrl":5517,"ogSiteName":667,"ogType":668,"canonicalUrls":5517,"schema":5518},"How being all-remote helps us practice our values at GitLab","GitLab CEO Sid Sijbrandij and Mark Frein of InVision talk about why all-remote is the future, and moving beyond 'But how do you know they're working?'","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680686/Blog/Hero%20Images/webcast-cover.png","https://about.gitlab.com/blog/pyb-all-remote-mark-frein","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How being all-remote helps us practice our values at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-07-31\",\n      }",{"title":5514,"description":5515,"authors":5520,"heroImage":5516,"date":5012,"body":5521,"category":808,"tags":5522},[672],"\n\nAll-remote workplaces like GitLab and InVision are disrupting the status quo by abandoning the office and creating a new model for the ideal workplace, and employees and employers are starting to catch on. GitLab CEO [Sid Sijbrandij](/company/team/#sytses) and [Mark Frein](https://www.linkedin.com/in/mark-frein-886148/), chief people officer at product design platform [InVision](https://www.invisionapp.com/), recently met to chat about the future of remote work, leadership in a distributed company, and the values that drive their work (and why [all-remote](/company/culture/all-remote/) isn’t one of them).\n\n## Build interpersonal relationships, digitally\n\nOn your first day at GitLab or InVision, you don’t walk up to the office, put on a smile, and find your desk. Instead, you sit on your desk chair, deck, or couch, open your laptop and connect using a suite of different technologies that provide a portal into your home.\n\n“I often say, ‘How often do you invite people into your home on day one when you're starting a new job?’” says Mark. “We are already inside your most personal space. We can see your bookcase, we can see things that are important to you, we can see your cat jumping on your lap, because animals always want to make sure they’re with you on important calls.”\n\nWhen a company empowers a distributed team to embrace the inevitable interruptions of doorbells ringing, phones buzzing, and demands from pets, children, and partners, you get to know your remote teammates better than if you shared an office. People are free to share more of themselves than if they were commuting from their homes to a common area.\n\nBy sharing your home, albeit digitally, with your colleagues, it is critical that your teammates show the same degree of humility and empathy for colleagues as they do for customers.\n\nAll-remote companies that are making hiring decisions ought to search for workers that are highly skilled in their areas of expertise, as well as in interpersonal communication. It is the active listeners, clear communicators, and willing collaborators that drive progress in all-remote companies, because these interpersonal skills allow teams to breach the digital divide and make lasting contributions to the company and product.\n\nLeadership in all-remote organizations must be similarly intentional. Managers do not have the benefit of serendipity at all-remote companies; instead, they must work harder to emotionally engage with the people they lead.\n\n## Technology is driving the all-remote movement\n\nThere are three primary communication channels that connect GitLab team members and InVision team members. “I think of our right and left hands as Zoom and Slack,” says Mark. At GitLab, we primarily use our own product, as well as Zoom and Slack to connect our distributed team.\n\nThe advent of these powerful communication tools is what helps all-remote companies like GitLab and InVision exist, and is a driving factor behind the movement for workplaces to go all-remote.\n\n“I think we're just at the beginning of this movement, and a lot of what's worked has been hacked together so far,” says Mark. “I think remote is going to last as long as the history of work, and it’s just in its infancy.”\n\nThinking back to 10 or 15 years ago, communication technologies first started being used in new and unique ways to mediate relationships. Mark points to the early days of online multiplayer game, World of Warcraft, as an example of serious all-remote gaming that helped condition us to using communication technology in collaborative ways. Just like WoW unlocked online massive multiplayer gaming, tools like Zoom unlock the potential of the all-remote workplace.\n\n## But wait, how do you know if they’re working?\n\nThere are many people from outside the all-remote world that remain incredulous about the idea of a distributed team. Both Sid and Mark are often asked the same questions about all-remote: \"How do you know that people are working?\"\n\n“I view these as old workplace, old economy questions,” says Mark. “Those are usually the least interesting questions.”\n\nThe framework that “work” is a lot of people in the building at the same time minimizes the focus on each individual contributor’s work product.\n\n“In many co-located companies, you can just show up and people will presume you’re working, but at GitLab we actually check your output and results,” says Sid.\n\nThere are also many people at co-located companies who will claim they value hiring the best people for the job, or that people are the heart of their organization, a statement largely incongruous with their practices, notes Sid.\n\n“You're saying people are the most important, but you limit your hiring to 1% of the world population? Then the people who are most important, you make them commute two hours of every day?” says Sid.\n\n## The drawbacks of part-remote\n\nIn response to the demand for greater flexibility in scheduling and workplaces, there are more co-located companies that are trying out remote teams or allowing a few remote work days a week or month. While this is generally a move in the right direction for greater employee autonomy, Mark and Sid have some skepticism about the effectiveness of this approach, because in each case there remains a single center of power.\n\n“I am still very much a skeptic around an organization that culturally is anchored in physicality bolting on remote capability,” says Mark. “I have not seen that work, which doesn't mean that it hasn't and I obviously haven't seen every organization out there, but in those cases there sare still real stretches of culture and behaviors when it comes to the haves and have-nots and the people who are in the center.”\n\nThere is intentionally no headquarters for GitLab or InVision, because by creating a physical room where it happens, there are certain advantages for the team members in the room, and disadvantages for those that are not.\n\nHistorically, GitLab’s company robot named Beamy, lived in the San Francisco boardroom, which is in Sid’s home in the city. Beamy was created as an exercise in [transparency](https://handbook.gitlab.com/handbook/values/#transparency), so every GitLab team member can see for themselves that there is no secret headquarters where decisions are made. “I’m just working from home like everyone else,” says Sid.\n\n## All-remote isn’t a value\n\nThe fixtures of GitLab’s company culture are our [values](https://handbook.gitlab.com/handbook/values/): collaboration, results, efficiency, Diversity, Inclusion & Belonging , and transparency. Everything in the company flows from those values, and while being all-remote is a distinguishing feature to our company, like InVision, we don’t really consider it to be a core value.\n\nDuring this part of the discussion, Sid, who is one of the rare people who can stay fully engaged in a conversation while also multitasking, added a section to our handbook, “[What is not a value](https://handbook.gitlab.com/handbook/values/#what-is-not-a-value),” which reads:\n\n“All remote isn't a value. It is something we do because it helps us practice our values of transparency, efficiency, Diversity, Inclusion & Belonging , and results.”\n\nWatch the full conversation between Sid and Mark on GitLab Unfiltered.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/IFBj9KQSQXA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[2749,9,832],{"slug":5524,"featured":6,"template":680},"pyb-all-remote-mark-frein","content:en-us:blog:pyb-all-remote-mark-frein.yml","Pyb All Remote Mark Frein","en-us/blog/pyb-all-remote-mark-frein.yml","en-us/blog/pyb-all-remote-mark-frein",{"_path":5530,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5531,"content":5537,"config":5542,"_id":5544,"_type":14,"title":5545,"_source":16,"_file":5546,"_stem":5547,"_extension":19},"/en-us/blog/quantifying-ux-positioning-of-the-clone-button",{"title":5532,"description":5533,"ogTitle":5532,"ogDescription":5533,"noIndex":6,"ogImage":5534,"ogUrl":5535,"ogSiteName":667,"ogType":668,"canonicalUrls":5535,"schema":5536},"Quantifying UX: Positioning the clone button","We wanted to move the clone button on the project overview page. Here's how user testing helped us make the right choices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672622/Blog/Hero%20Images/positioning-clone-button.jpg","https://about.gitlab.com/blog/quantifying-ux-positioning-of-the-clone-button","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quantifying UX: Positioning the clone button\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matej Latin\"}],\n        \"datePublished\": \"2019-07-26\",\n      }",{"title":5532,"description":5533,"authors":5538,"heroImage":5534,"date":5539,"body":5540,"category":743,"tags":5541},[1897],"2019-07-26","\nWe recently redesigned GitLab's project overview page in an effort to make it easier to read. We wanted\nto make it simple for users to understand what the project is about and to get a quick overview of\nits status and activity. We considered moving the clone button further down the page,\nbut decided to put a smaller version in the header instead. The logic behind this decision:\n*Things further down the page are harder to find.*\n\n![GitLab's project overview before the most recent redesign](https://about.gitlab.com/images/blogimages/clone-button-positioning/01.jpg){: .medium.center}\n\nThe original project overview page. Lack of structure and an unclear information architecture were\n  two major problems.\n  {: .note.text-center}\n\nWe know one of the main things users want to do on the project overview page is *clone the project*.\nWe were already changing the UI so we would hide both clone URLs (HTTPS and SSH) behind a\ndedicated “clone” button, but we were concerned that change would have a negative\nimpact on the discoverability of the cloning options.\n\n![Redesigned project overview page](https://about.gitlab.com/images/blogimages/clone-button-positioning/02.jpg){: .medium.center}\n\nThe redesigned project overview page that is currently live.\n{: .note.text-center}\n\nWe received some negative feedback after the change but nothing that was too serious. The feedback was mostly about\nhaving to make an additional click to get to what the user wants. We concluded\nit was a compromise we could live with.\n\n## Moving the clone button\n\nBut after a while, we started receiving more feedback and suggestions\nto [move the clone button down to the file tree control area](https://gitlab.com/gitlab-org/gitlab-ce/issues/60022).\nThe initial suggestion was made because the recent redesign of the project overview page made\nthe clone button completely disappear from the repository page. Removing it from\nthe file tree section in one place removed it from all occurrences of this UI pattern.\n\n![New position for the clone button](https://about.gitlab.com/images/blogimages/clone-button-positioning/03.jpg){: .medium.center}\n\nThe proposal suggested we move the clone button down to the file tree controls.\n{: .note.text-center}\n\nI remembered the negative feedback we received for our most recent change so I wanted to\nmake our decision with some research. I quickly created a [UsabilityHub](https://usabilityhub.com) click test\nthat would tell us if the discoverability of the button worsened by moving it further down the page. The test was\nsimple: show the new design and ask the participants one\nquestion – *Where would you click to copy (and sync) this repository to your local machine?*\nOur UX research team helped me shape the question so that it wasn’t leading (we couldn’t use\nthe word “clone”). We would also run a control test with the live design – the one where\nthe clone button is in the header – so that we could have a baseline for comparison.\n\n![The click test](https://about.gitlab.com/images/blogimages/clone-button-positioning/click-test.gif){: .medium.center}\n\nThis is what solving a click test looked like.\n{: .note.text-center}\n\nAs I was working on the test, I thought it was going to further validate the recent change where\nwe moved the clone button to the header. It makes sense: If a dark blue button is on the\ntop right on a page, it’s easier to notice than if it’s further down or possibly below the fold.\nBut then I remembered that other Git platforms (most notably GitHub) have the clone button in the same\nplace we were considering. The test went live and I had no idea what to expect. We soon collected\naround 40 answers to each of the two variations and we felt that was enough to draw conclusions.\n\nThe results were surprising.\n\n![The results of the test](https://about.gitlab.com/images/blogimages/clone-button-positioning/04.jpg){: .medium.center}\n\nThe results of the new design on the left and the current one on the right.\n{: .note.text-center}\n\n| Version | Correct answers | Time required |\n| ------- | ---- | --------------|\n| New | 98%    | 15s         |\n| Current | 84%    | 21s         |\n\nAlmost all participants (98%) answered correctly in the new design compared to 84% in the current design.\nAnd in the new design it took them six seconds less to answer – 15 seconds instead of 21. So this means it\nmakes sense to move the clone button to the file tree controls and reintroduce it on the repository page.\nIt’s a win-win. No compromises there. But what can we do when the repository of a project\nis empty? We show different information on that page when a repository is empty and the layout of\nthe page is slightly different too.\n\n## Cloning an empty repository\n\nSo we solved one part of the problem and now it was time to solve the other part. When the\nrepository of a project is empty we show instructions on how to use it.\nCloning instructions are included as well but there’s no button in the cloning instructions or\nanywhere close. So far we didn’t really need one as we had one in the header.\n\n![Current empty repository page layout](https://about.gitlab.com/images/blogimages/clone-button-positioning/05.jpg){: .medium.center}\n\nCurrent empty repository project overview page.\n{: .note.text-center}\n\nBut moving that button down to the file tree controls now meant we wouldn’t have a button in\nthe header anymore. This same scenario applies to the empty repository too! So what should we do? What\nwould happen if we completely removed it?\n\n![Empty repository page without the clone button](https://about.gitlab.com/images/blogimages/clone-button-positioning/06.jpg){: .medium.center}\n\nEmpty repository project overview page without the clone button. Will removing\n  it have a profoundly negative effect on user experience?\n  {: .note.text-center}\n\nThis was another question we could answer with a quick test. I created two variations of the\ntest – one with the button in the header (current design) and one without it (new design). We would\nshow one of the variations to a participant and ask: *Where would you find the\ninformation for copying (and syncing) this repository to your local machine?*\n\nYou’re probably thinking the result of this test should be obvious – the variation\nwith the button should win. We were thinking that too, but we wanted to see what the difference was.\nWe wanted to quantify it so we could make an informed decision. If the results were really\nbad, we would consider adding a clone button to the instructions area. This solution felt a bit\nodd so we wanted to make sure it was the right thing to do.\n\n![Results of the second test](https://about.gitlab.com/images/blogimages/clone-button-positioning/07.jpg){: .medium.center}\n\nResults of the new design (without the button) on the left and the current design (with the button)\n  on the right.\n  {: .note.text-center}\n\nAnd yes, the results were what we expected. Just over three-quarters of users (77%) answered\ncorrectly in the current design and it took them 16 seconds. Removing the button altogether meant\nonly 50% of users found the cloning information and it took them 37 seconds. That’s 21 seconds longer!\nWe concluded removing the button had a very negative impact on user experience so we decided\nto introduce a clone button in the instructions area.\n\n| Version | Correct answers | Time required |\n| ------- | ---- | --------------|\n| New | 50%    | 37s         |\n| Current | 77%    | 16s         |\n\n![New design for the empty repository page](https://about.gitlab.com/images/blogimages/clone-button-positioning/08.jpg){: .medium.center}\n\nIn the end, we decided to add the clone button on top of the instructions sections, where\n  all other buttons already are.\n  {: .note.text-center}\n\nThe solution is [currently being implemented by a member of our awesome\ncommunity](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/27754) and we’re looking forward\nto seeing this change live!\n\nRead my previous [Quantifying UX blog post about redesigning GitLab's settings pages](/blog/quantifying-ux-validating-the-redesign-of-gitlabs-settings-pages/).\n\nCover image by [David Travis](https://unsplash.com/@dtravisphd?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,700,1698],{"slug":5543,"featured":6,"template":680},"quantifying-ux-positioning-of-the-clone-button","content:en-us:blog:quantifying-ux-positioning-of-the-clone-button.yml","Quantifying Ux Positioning Of The Clone Button","en-us/blog/quantifying-ux-positioning-of-the-clone-button.yml","en-us/blog/quantifying-ux-positioning-of-the-clone-button",{"_path":5549,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5550,"content":5556,"config":5561,"_id":5563,"_type":14,"title":5564,"_source":16,"_file":5565,"_stem":5566,"_extension":19},"/en-us/blog/quantifying-ux-validating-the-redesign-of-gitlabs-settings-pages",{"title":5551,"description":5552,"ogTitle":5551,"ogDescription":5552,"noIndex":6,"ogImage":5553,"ogUrl":5554,"ogSiteName":667,"ogType":668,"canonicalUrls":5554,"schema":5555},"Quantifying UX: How we validated the redesign of GitLab's settings pages","A GitLab senior UX designer shares how we determined whether a recent redesign improved the overall experience for users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683361/Blog/Hero%20Images/user-testing-validating-redesign.jpg","https://about.gitlab.com/blog/quantifying-ux-validating-the-redesign-of-gitlabs-settings-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quantifying UX: How we validated the redesign of GitLab's settings pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matej Latin\"}],\n        \"datePublished\": \"2019-03-13\",\n      }",{"title":5551,"description":5552,"authors":5557,"heroImage":5553,"date":5558,"body":5559,"category":299,"tags":5560},[1897],"2019-03-13","\nThere are three main settings pages in GitLab: group settings, project settings, and admin settings. Shortly after I joined GitLab, the group settings page was redesigned to match a recent change that was implemented for the project settings, to “tidy up” all content into expandable sections. The idea was well intended, because these settings pages can be extremely long, full of diverse content and forms, and they’re very hard to read. It’s also difficult to find information when everything is simply “out there.”\n\nThe group and project settings pages were both redesigned in a short amount of time. Both are critical to using GitLab, which means that many users engage with them. This is great, because when that’s the case, we get lots of feedback after introducing changes. Unfortunately, in this case, the feedback was negative. [Users began to tell us that it was even harder to find the setting](https://gitlab.com/gitlab-org/gitlab-ce/issues/41230) they needed after the change was introduced. Instead of scrolling through the page and scanning it for relevant content, they now had to expand the sections and look for it there. The labels of these sections weren’t descriptive, so they often had to resort to guessing.\n\n![GitLab's project settings page](https://about.gitlab.com/images/blogimages/validate-redesign/project-settings.jpg){: .large.center}\n\n## Improvements to the settings pages\n\nI came up with some somewhat basic changes that could lead to significant improvements. In the issue titled [Improve settings pages design by prioritizing content: Discovery](https://gitlab.com/gitlab-org/gitlab-ce/issues/47405) I suggested we:\n* Prioritize the content by following the 80/20 principle (what do most users look for on these pages?).\n* Improve the labels for the expandable sections by making them descriptive.\n* Make the titles clickable (instead of just having the “expand/collapse” button) and\n* Shift content around if needed.\n\nThe 80/20 principle, also known as the [Pareto principle](https://en.wikipedia.org/wiki/Pareto_principle), suggests that 80 percent of effects come from 20 percent of causes. Further research suggests that this principle can be commonly observed in pretty much anything. So, in our case, applying the principle means: Can we prioritize the 20 percent of content that 80 percent of users look for?\n\nThis meant that we needed to rethink the information architecture (IA) of the page. If we introduce a section with prioritized content, as suggested in the improvements above and illustrated below, could we take some of the content that is commonly searched for and move it into that section?\n\n![Project settings page redesign concept](https://about.gitlab.com/images/blogimages/validate-redesign/redesign-concept.jpg){: .large.center}\n\nSoon after the discovery issue in milestone 11.2, I came up with a redesign that would accomplish all of the above. We started with the Group settings because it’s the simplest settings page, with the least amount of content. It took us longer than originally anticipated to implement the changes, and we shipped in 11.5, a little under three months later.\n\n![Redesigned project settings page](https://about.gitlab.com/images/blogimages/validate-redesign/group-settings-redesigned.jpg){: .large.center}\n\n## Some thoughts on designers conducting their own UX research\n\nIdeally, I would have done some UX research/validation before implementation to see if the new designs are actually better. But in this case, the changes were mostly general best practices in terms of UI design and information architecture, so I was confident that they were all going to result in improvements.\n\nBut I wanted to quantify the results and confirm whether they were actually better, and if so, by how much? Confidence in design is good (and even required sometimes), but we should never replace measurement of results with it. Besides, the group settings redesign was a pilot: if all turned out well, we would redesign project settings and admin settings in a similar fashion, so I wanted to be 100 percent sure and ran the test.\n\nIn addition, the UX department at GitLab has been striving to get into a position where designers can conduct their own UX research. We want designers to conduct research in a quick way that allows them to get the results they need to move forward. This can be done with some guidance from the UX research department, but it is not necessary for them to always be 100 percent involved.\n\n### Why should designers do their own research?\n\nIn this particular example, the validation was done after the implementation of the redesign, but ideally, this type of research would be done before a single line of code was written. Even sooner, it can be done on the same day that the designer mocked up the UI solution. The greatest benefit of doing this is that it eliminates waiting and speeds up the cycle of feedback. A lot. Instead of waiting for weeks for something to get implemented, a designer creates a test by themselves, coordinates with UX research, get participants to solve the test, and analyzes the results – all in the same day.\n\n## How do we validate UI design and IA changes?\n\nIn this case, the redesign introduced mostly UI and information architecture (IA) changes. How do you test these kind of changes, especially when you work remotely? The answer is surprisingly simple: Create two “click tests” on [Usability Hub](https://usabilityhub.com/): One for the design of the page as it is now (original) and one for the redesign. Most users complained that they didn’t know which section contained the item they were looking for. This was the most important problem that needed to be solved, so I came up with a simple test: show the participants the design (either original or the redesign) and ask them questions which they answered by clicking on a design. For example, they would see the following (the redesign):\n\n![Redesign of settings pages](https://about.gitlab.com/images/blogimages/validate-redesign/test-redesign.jpg){: .medium.center}\n\nAnd they would answer the following questions:\n\n* Where do you think you can change who can see the details of this group?\n* Where do you think you can add an extra layer of security for signing in?\n* Where do you think you can change the URL of this group?\n\nEach of these three questions were followed up by two additional ones:\n\n* How easy/difficult was it to find?\n* How confident are you that the setting is in the section you selected?\n\n![Test redesign follow up](https://about.gitlab.com/images/blogimages/validate-redesign/test-redesign-followup.jpg){: .medium.center}\n\nThe participants responded with a rating of 1 to 5 for each of the follow-up questions. With the main questions, we measured the time required to answer (click) and whether the answer was correct or not. The follow-up questions helped us measure perceived difficulty and confidence.\n\n### Assumptions to validate\n\nWe wanted to validate the following assumptions:\n\n| Assumption | Validated/invalidated |\n| ------ | ------ |\n| Users will need less time to find the settings | ✅ / ❌   |\n| A greater number of users will click on the correct areas | ✅ / ❌  |\n| Users will be more confident in their section choices (new compared to old) | ✅ / ❌  |\n| The perceived difficulty of the tasks will improve | ✅ / ❌ |\n\nWe decided that if three out of four of those assumptions were validated we would consider the redesign a success. You can preview the tests at the following links (feel free to complete them, but they’re not collecting results anymore):\n* [Original](https://app.usabilityhub.com/preview/87c510cf7078)\n* [Redesign](https://app.usabilityhub.com/preview/fc581c732b7e)\n\n## Results\n\nWe shared our tests on Twitter and with [GitLab First Look](/community/gitlab-first-look/), our UX Research mailing list. We received more than 600 responses, and the results were evenly distributed between the original versus the redesign. The findings weren’t really surprising, but they validated our redesigns. We knew our work improved the experience of our users and we could now apply a similar approach to the other settings pages.\n\n| Version | Task | Time required | Correct answers | Confidence (mean)* | Perceived difficulty (mean)* |\n| ------- | ---- | --------------| ----------------|-------------------|-----------------------------|\n| Original| 1    | 19.4s         | 77%             | 3.6               | 2.1                         |\n| Redesign| 1    | 25.9s         | 78%             | 4.1               | 1.9                         |\n| Original| 2    | 14.6s         | 34%             | 3.2               | 2.4                         |\n| Redesign| 2    | 8.7s          | 97%             | 4.1               | 1.9                         |\n| Original| 3    | 6.4s          | 49%             | 3.9               | 1.9                         |\n| Redesign| 3    | 16.1s         | 92%             | 3.7               | 2.5                         |\n\n*Confidence: higher is better*\n\n*Perceived difficulty: lower is better*\n\n*I only counted the correct answers for confidence and perceived difficulty.\n\nOriginal test: 389 participants — [Results](https://app.usabilityhub.com/tests/87c510cf7078/results/e20614040355) \u003Cbr>\nRedesign test: 266 participants — [Results](https://app.usabilityhub.com/tests/fc581c732b7e/results/b016819adc5a)\n\n![Results heatmap](https://about.gitlab.com/images/blogimages/validate-redesign/results-heatmap.jpg){: .shadow.medium.center}\n\n*\u003Csmall>The heatmap feature in Usability Hub allowed us to see that the majority of users were clicking in the correct area, so they were finding what they were looking for.\u003C/small>*\n\nBy running such tests, we now have data that can help us quantify the user’s experience – in other words, we can measure the design’s impact. It took some users longer to find what they were looking for in the redesign, but their confidence in the correctness of their answer improved and the tasks were also perceived as less difficult.\n\nMost encouraging was the huge difference in how many respondents answered correctly compared to the original. We saw an increase from 34 to 97 percent in the second question and 49 to 92 percent in the third question, which proved that the redesign solves the problem that most users complained about: finding things.\n\nIf we look back to our assumptions, we validated three out of four, fulfilling the success criteria that we established at the start. The only assumption that wasn’t validated was that \"Users will need less time to find the settings.\" It took the participants longer to answer two out of the three questions.\n\n| Assumption | Validated/invalidated |\n| ------ | ------ |\n| Users will need less time to find the settings | ❌   |\n| A greater number of users will click on the correct areas | ✅  |\n| Users will be more confident in their section choices (new compared to old) | ✅  |\n| The perceived difficulty of the tasks will improve | ✅ |\n\n## What’s next?\n\nWe want to continue building on this success and improve all settings pages. Unfortunately, the project settings redesign did not make it into 11.7, but we are hopeful it will be included in one of the next few releases. We will then proceed to improve the other settings pages, as well as other improvements, such as [adding inline search](https://gitlab.com/gitlab-org/gitlab-ce/issues/50145). You can follow our progress through the [Improve and align settings pages UX](https://gitlab.com/groups/gitlab-org/-/epics/196) epic.\n\nAs we move forward, we want to do more of this kind of validation/research. We want to come to a place where designers have enough time and confidence in doing their own UX research and do it before implementation starts, in a single milestone, so we can keep moving fast and shipping more awesome things. If you have UX research skills and experience and want to work at GitLab, [check out our Careers page](/jobs/).\n\nYou can also read more about [how we conduct remote UX research at GitLab](/blog/conducting-remote-ux-research/).\n\nCover image by [Alvaro Reyes](https://unsplash.com/photos/qWwpHwip31M?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/user-test?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)",[9,700,1698],{"slug":5562,"featured":6,"template":680},"quantifying-ux-validating-the-redesign-of-gitlabs-settings-pages","content:en-us:blog:quantifying-ux-validating-the-redesign-of-gitlabs-settings-pages.yml","Quantifying Ux Validating The Redesign Of Gitlabs Settings Pages","en-us/blog/quantifying-ux-validating-the-redesign-of-gitlabs-settings-pages.yml","en-us/blog/quantifying-ux-validating-the-redesign-of-gitlabs-settings-pages",{"_path":5568,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5569,"content":5575,"config":5580,"_id":5582,"_type":14,"title":5583,"_source":16,"_file":5584,"_stem":5585,"_extension":19},"/en-us/blog/questions-regarding-our-zero-trust-efforts",{"title":5570,"description":5571,"ogTitle":5570,"ogDescription":5571,"noIndex":6,"ogImage":5572,"ogUrl":5573,"ogSiteName":667,"ogType":668,"canonicalUrls":5573,"schema":5574},"We answer your most popular questions about our Zero Trust journey","From why we chose Okta to issues around data fluidity, here are answers to your most-asked ZT questions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681098/Blog/Hero%20Images/lysander-yuen-wk-ztn-unsplash.jpg","https://about.gitlab.com/blog/questions-regarding-our-zero-trust-efforts","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We answer your most popular questions about our Zero Trust journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2020-02-19\",\n      }",{"title":5570,"description":5571,"authors":5576,"heroImage":5572,"date":5577,"body":5578,"category":720,"tags":5579},[1574],"2020-02-19","\n\n_It’s been a busy few months since my last blog post on our Zero Trust efforts, [\"Zero Trust at GitLab: Where do we go from here?\"](/blog/zero-trust-at-gitlab-where-do-we-go-from-here/). Since then I’ve done a few [press interviews](https://www.digi.no/artikler/zero-trust-du-ikke-kan-basere-deg-bare-pa-en-leverandor-for-a-lose-det/484170) and spoken at security conferences (most recently at [ShmooCon 2020](https://www.shmoocon.org/speakers/#0trust)) on the topic of Zero Trust. I’ve been transparent about GitLab’s implementation of security and our pursuit of Zero Trust ideas. I received many questions about Zero Trust at ShmooCon, both at the end of the talk and in the hallways after. I thought I’d pass on a few of those questions with some answers since many people are interested in the actual implementation of the ideas. It’s also a good way to show what happens when a well-meaning concept meets harsh reality._\n\nWarning: Video contains some strong language\n{: .note}\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/vI7_M04qpJ4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\nI discussed data classification challenges and specifically the fluidity of data in my ShmooCon talk and was met with a lot of hallway questions. More than one person asked for an example and wanted my opinion on how to classify the data. Hence this far-reaching question:\n\n### What do you mean that there are issues with the \"fluidity\" of data when it comes to data classification?\n\nIn an earlier blog post I did give an example of the fluidity of data, specifically when I talked about the movement of data in a section called (appropriately) [\"Movement of data\"](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/). Data fluidity is an issue because access to data is usually defined and enforced at the time of authentication. Waiting until the authentication stage causes problems if the data stays stationary while its classification changes rapidly. If you authenticate in the morning and have access to YELLOW data, but that data’s classification changes to RED in the afternoon, how do we enforce access controls if the user is only allowed access to YELLOW data? It is possible that you could move the data to a location intended for RED access only, or assign a user some type of access token specific to data based upon that data’s classification. But, you will need a solution that will scale and will additionally need to worry about whether data is being cloned, copied or archived. This is a question where it is much easier to explain the issue than solve it, and we are still looking for answers.\n\nThe concept of zones is a stop-gap until a better solution can be implemented. A data zone is comprised of data of different classifications, but with a single allow/deny method of access control. In other words, a data zone that contains both ORANGE and YELLOW data is ranked as an ORANGE ZONE since that is the highest level of data contained within it. Since we cannot specify granular access to the ORANGE ZONE resource, someone with YELLOW access cannot access the YELLOW data inside the ORANGE ZONE. The goal is to eliminate the zones so that we can define granular access to data. Most of the zones are set up to accommodate legacy systems into the data classification scheme and need to eventually be eliminated. This is, of course, a common problem in information technology – how do we move off of old systems onto new systems without disrupting existing processes and procedures. GitLab is very fortunate in that we have very few data zones compared to most companies, but it is always a problem when we encounter them.\n\nThe more advanced problem is that most technologies assign authorization to access data based upon the moment of initial user authentication. We want to eliminate data zones and we want to eliminate complexity. Making copies of data and storing a YELLOW version in one place and a RED version in another complicates things. Using an automated process that allows a non-privileged user to see privileged data also complicates things. The good news is that we are far enough in the Zero Trust process that we are dealing with this challenge. The bad news is we don’t have an answer yet but we’re still searching for something that works.\n\n\nWe get questions about our choice of vendors, mainly our choice of Okta as a major vendor for Zero Trust. Most organizations find it difficult to accept an approach where there is little or no competition in certain arenas, and in hallway conversations, people seemed alarmed that they’d be putting all of their digital eggs into a single digital basket. Some people have asked for an explanation as to why we are putting all of our end user identity in one basket:\n\n### You’re using Okta, what other tools did you look at? What didn’t meet your criteria?\n\nWe were looking for an [identity-management system](https://en.wikipedia.org/wiki/Identity-management_system) (IMS) that allows us to positively identify users during the authentication process. The IMS needed to have multi-factor authentication (MFA) capability and be able to support a lot of SaaS products. Okta gave us this and had a lot more features we’ve since started using. We also looked at products that mainly did MFA, but it was meeting those critical items along with a lot of extras we could take advantage of that clinched it.\n\nThe flexibility of Okta and the ability to implement something more than one way based upon user need was an unexpected benefit – MFA is an example. Some of our team members agreed to use U2F in the form of Yubikeys. This worked great, although some team members expressed concerns about possibly losing the keys or worried about the risk of leaving a low profile Yubikey plugged in all the time in case the entire laptop was lost or stolen. Since Okta’s MFA solutions also included the Okta Verify phone app that supported \"push\" technology, we could allow team members to have a choice in MFA methods. Team members could use the Yubikey or the push technology based upon what best suited their workflow, and we were able to get MFA implemented with team members actually using it. Allowing us to give team members a choice instead of simply forcing a method upon them leads to a happier adoption process, quicker overall implementation, and of course, a more secure work environment.\n\nMost vendors don’t offer the level of flexibility Okta does with their products or allow for that level of granularity with features when it comes to identity management, so there really were not a lot of other choices. Add in support for provisioning and de-provisioning for dozens of SaaS applications and it was obvious we’d get a great ROI.\n\n### How do you separate the hype from the fact when looking at Zero Trust?\n\nFirst off, for our implementation, we just identified what we wanted out of a security system that granted access to users, systems, and data. You can’t just say \"we want Zero Trust\" because every vendor claims to sell Zero Trust solutions. We used the [BeyondCorp paper](https://cloud.google.com/beyondcorp/) as an example of Google doing something for themselves, and not as a blueprint for us. We just looked for products that met our \"must-have\" list, and if it had a lot of \"nice to haves\" available that was great. It was even better if it had useful features we hadn’t even considered. So we ended up with Okta as a cornerstone for user identity and authentication, and now all products need to speak Okta, or at least support the protocols that Okta supports. That makes it easy, or at least easier to make things work together if we define a common bit of criteria - every solution must tie into Okta.\n\nThe hard part is that user identity and authentication is only one part of the picture. We need to do end-user device identity and authentication. We need to assign identity to running processes, including those kicked off by users, and those fully automated and triggered by events. And, getting into non-Zero Trust territory but still very much in line with our goals, we want to be able to audit all of our controls. We want to be able to log everything and search those logs for anomalies. Therefore we have to make sure that any Zero Trust solution can support auditing and logging.\n\n\n**What do you want to know? Do you have your own questions? Let us know!**\nWe’re still moving forward as our Zero Trust implementation is a work in progress. As we hit milestones, we will continue to update you with new blogs with hopefully new solutions and processes that work. Right now we’re deploying a solution for SSH by using Okta ASA, and we’re still tackling our asset management, so expect news from those fronts in upcoming blog posts!\n\n\n\nCover image by [Lysander Yuen](https://unsplash.com/@lysanderyuen?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com).\n{: .note}\n",[9,720,2057],{"slug":5581,"featured":6,"template":680},"questions-regarding-our-zero-trust-efforts","content:en-us:blog:questions-regarding-our-zero-trust-efforts.yml","Questions Regarding Our Zero Trust Efforts","en-us/blog/questions-regarding-our-zero-trust-efforts.yml","en-us/blog/questions-regarding-our-zero-trust-efforts",{"_path":5587,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5588,"content":5594,"config":5599,"_id":5601,"_type":14,"title":5602,"_source":16,"_file":5603,"_stem":5604,"_extension":19},"/en-us/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses",{"title":5589,"description":5590,"ogTitle":5589,"ogDescription":5590,"noIndex":6,"ogImage":5591,"ogUrl":5592,"ogSiteName":667,"ogType":668,"canonicalUrls":5592,"schema":5593},"RAIL-M is an imperfectly good start for AI model licenses","\"GitLab, Inc. is dedicated to open source and AI. This is our take on a model license relevant to open source and AI communities: the BigScience Open RAIL-M license.\"","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671836/Blog/Hero%20Images/railmimage.jpg","https://about.gitlab.com/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"RAIL-M is an imperfectly good start for AI model licenses\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robin Schulman\"}],\n        \"datePublished\": \"2023-07-25\",\n      }",{"title":5589,"description":5590,"authors":5595,"heroImage":5591,"date":5596,"body":5597,"category":1839,"tags":5598},[4412],"2023-07-25","GitLab, Inc. is dedicated to open source – we believe in it, use it, and give back to it, and we have an [open core](https://about.gitlab.com/company/stewardship/) business model. We also care deeply about artificial intelligence (AI) – we recently announced that we are investing heavily in AI by [infusing it into every phase of our comprehensive DevSecOps platform](https://about.gitlab.com/solutions/ai/).\n\nWe were thus very interested to see [Responsible AI Licenses'](https://www.licenses.ai/) recent release of a model license relevant to both the open source and AI communities: the BigScience Open RAIL-M license ([RAIL-M](https://www.licenses.ai/blog/2022/8/26/bigscience-open-rail-m-license)).\n\nWe see RAIL-M as an exciting but flawed development in the AI model licensing space. Its authors’ intentions are admirable and important, but in practical terms, RAIL-M still has room for improvement.\n\n### What is RAIL-M?\nRAIL-M is part of the Open Responsible AI Licenses (Open RAIL) [family](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), which is a collection of AI licenses that aim to promote responsible use by imposing behavioral use restrictions on the model’s licensees and downstream users. The Open RAIL family is not alone – it’s within a new wave of licenses (see, for example, the [TII Falcon LLM License](https://huggingface.co/tiiuae/falcon-40b/blob/main/LICENSE.txt)) spawned by the public’s recent interest in AI. RAIL-M specifically applies these use restrictions to the model (the “M” in RAIL-M stands for “model”).\n\nThis blog discusses RAIL-M specifically, and does not consider the other licenses in the Open RAIL family.\n\n### RAIL-M puts ethics at the forefront\nIn recent months, much ink has been spilled over the novel ethical dilemmas that AI presents. Technologists, journalists, and companies alike have sounded the alarm on the various societal harms that AI could exacerbate (see, for example, OpenAI, Google Deepmind, and other AI companies’ [recent open letter](https://www.nytimes.com/2023/05/30/technology/ai-threat-warning.html) declaring that AI poses a “risk of extinction”). Regulators are taking notice. Recently, OpenAI’s CEO Sam Altman testified in a Senate hearing on AI’s risks, and a key committee of European Parliament lawmakers [approved the EU AI Act](https://www.europarl.europa.eu/news/en/press-room/20230505IPR84904/ai-act-a-step-closer-to-the-first-rules-on-artificial-intelligence), which aims to mitigate AI’s potential harms.\n\nRAIL-M places these ethical considerations front and center. Its restrictions prohibit the AI model’s licensees and their downstream users from engaging in potentially harmful uses such as applying the model in a way that violates applicable law, to provide medical advice, or to harass or defraud others.\n\nThese provisions’ practical implications are, admittedly, still a bit unclear. Regardless, these use-based restrictions will, at the very least, deter some from applying the model in harmful ways, and help push ethical considerations to the forefront of today’s fast-paced AI landscape. In the words of the Organisation for Economic Co-operation and Development ([OECD](https://oecd.ai/en/catalogue/tools/bigscience-openrail-m-license)): “OpenRAILs are a vehicle towards the consolidation of an informed and respectful culture of sharing AI artifacts acknowledging their limitations and the values held by the licensors of the model.”\n\n### In practice, RAIL-M isn’t perfect\nFirst, describing RAIL-M as an “open” license – as RAIL-M’s authors have in its title – is misleading. RAIL-M’s authors conflate royalty-free access and flexible use and re-distribution with truly “open” licenses. The Open Source Initiative ([OSI](https://opensource.org/osd/)) defines “open source” as software that, among other qualities, “must not restrict anyone from making use of the program in a specific field of endeavor.” RAIL-M’s use-based restrictions – which include prohibitions on providing medical advice, and generating information to be used for the administration of justice or law enforcement – prevent it from being a truly “open” license.\n\nSecond, regulators such as those in the EU will likely pass laws imposing certain use restrictions on AI tools in the near future. RAIL-M doesn’t cover how its own use-based requirements will interact with AI-related laws, which may present an issue if, for example, a RAIL-M restriction conflicts with one of these new regulations.\n\nFinally, commentators, including [Kyle Mitchell](https://writing.kemitchell.com/2023/01/26/Open-RAIL-M-Unclear) and [Luis Villa](https://blog.tidelift.com/evaluating-the-rail-license-family), have also expressed concerns that some of RAIL-M’s requirements may be too vague to comply with.\n\n### A net benefit to the AI community\nRAIL-M isn’t perfect. However, setting aside its practical flaws, RAIL-M’s release is still an important signal both to and from the AI community that AI ethics matter and must be considered even (and perhaps especially) when offering free, publicly-available models. To again quote [the OECD](https://oecd.ai/en/catalogue/tools/bigscience-openrail-m-license): “[l]icenses [like those in the Open RAIL family] … should not be conceived as burdensome legal technical mechanisms, but rather as a communication instrument among AI communities bringing stakeholders together by sharing common messages on how the licensed artifact can be used.”\n\nRAIL-M, and the Open RAIL family as a whole, will likely encourage the AI community – both AI model maintainers and perhaps even proprietary model creators – to consider, and work to mitigate, their models’ potential harms and abuses. We’ll be interested to see where it goes.\n\n_Cover image by [Google DeepMind](https://unsplash.com/@deepmind?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/ZJKE4XVlKIA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[1299,9],{"slug":5600,"featured":6,"template":680},"rail-m-is-an-imperfectly-good-start-for-ai-model-licenses","content:en-us:blog:rail-m-is-an-imperfectly-good-start-for-ai-model-licenses.yml","Rail M Is An Imperfectly Good Start For Ai Model Licenses","en-us/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses.yml","en-us/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses",{"_path":5606,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5607,"content":5612,"config":5618,"_id":5620,"_type":14,"title":5621,"_source":16,"_file":5622,"_stem":5623,"_extension":19},"/en-us/blog/reconfigure-inbound-email-for-gitlab-notification",{"title":5608,"description":5609,"ogTitle":5608,"ogDescription":5609,"noIndex":6,"ogImage":2010,"ogUrl":5610,"ogSiteName":667,"ogType":668,"canonicalUrls":5610,"schema":5611},"GitLab inbound email issue notification","We've identified a potential risk impacting those using our email an issue to project, Reply by Email, and Service Desk features.","https://about.gitlab.com/blog/reconfigure-inbound-email-for-gitlab-notification","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab inbound email issue notification\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jim Thavisouk\"}],\n        \"datePublished\": \"2018-03-06\",\n      }",{"title":5608,"description":5609,"authors":5613,"heroImage":2010,"date":5615,"body":5616,"category":675,"tags":5617},[5614],"Jim Thavisouk","2018-03-06","\n\nGitLab.com provides users the capability to [create new issues via email](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#new-issue-via-email), which can also be managed by [Service Desk](https://docs.gitlab.com/ee/user/project/service_desk.html). This is accomplished through a dynamically generated email address that is currently being managed with GitLab's domain name (@gitlab.com). It has come to our attention that an attacker can abuse this process to perform actions outside the intended scope with the @gitlab.com domain. This issue impacts users who are using email an issue to project, [Reply by Email](https://docs.gitlab.com/ee/administration/reply_by_email.html), and Service Desk.\n\n\u003C!-- more -->\n\n## Customer remediation steps\n\nOur users should check to see if they are using the create new issues via email feature.\n\nIf aliases were used, update those aliases from `@gitlab.com` to `@incoming.gitlab.com`.\n\nIf domain whitelisting was used, please update those domains from `@gitlab.com` to `@incoming.gitlab.com`.\n\nThese changes can be made _immediately_.\n\n## GitLab remediation strategy\n\nWe will update the addresses from `@gitlab.com` to `@incoming.gitlab.com`.\n\nWe will reach out to users directly that are still using the old address to make sure the new addresses are being used instead, by **April 17, 2018**.\n\nAll addresses with the @gitlab.com domain will be disabled **April 31, 2018**. Incoming email to the address will be rejected.\n",[720,9],{"slug":5619,"featured":6,"template":680},"reconfigure-inbound-email-for-gitlab-notification","content:en-us:blog:reconfigure-inbound-email-for-gitlab-notification.yml","Reconfigure Inbound Email For Gitlab Notification","en-us/blog/reconfigure-inbound-email-for-gitlab-notification.yml","en-us/blog/reconfigure-inbound-email-for-gitlab-notification",{"_path":5625,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5626,"content":5632,"config":5638,"_id":5640,"_type":14,"title":5641,"_source":16,"_file":5642,"_stem":5643,"_extension":19},"/en-us/blog/reducing-time-to-payout-and-launching-a-bug-bounty-anniversary-contest",{"title":5627,"description":5628,"ogTitle":5627,"ogDescription":5628,"noIndex":6,"ogImage":5629,"ogUrl":5630,"ogSiteName":667,"ogType":668,"canonicalUrls":5630,"schema":5631},"Why we're reducing the time to payout and launching a bug bounty anniversary contest","You talked. We listened. Quicker bug bounty payouts and we're holding a contest for our hackers!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678574/Blog/Hero%20Images/art-backlight-blur-249203.jpg","https://about.gitlab.com/blog/reducing-time-to-payout-and-launching-a-bug-bounty-anniversary-contest","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we're reducing the time to payout and launching a bug bounty anniversary contest\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dennis Appelt\"}],\n        \"datePublished\": \"2019-09-24\",\n      }",{"title":5627,"description":5628,"authors":5633,"heroImage":5629,"date":5635,"body":5636,"category":720,"tags":5637},[5634],"Dennis Appelt","2019-09-24","\nIn just nine months since [going public with our bug bounty program](/blog/gitlab-hackerone-bug-bounty-program-is-public-today/), our reporter community has made substantial contributions to the security and continued success of GitLab. Since going public, our community of external security researchers submitted 1016 reports and we paid out [$395,000 in bounties](https://hackerone.com/gitlab).\n\nWe are very grateful for your contributions and have an open line for feedback regarding our bug bounty program.\n\n## You talked, we listened\n\nIn fact, when we asked you how we could strengthen our bug bounty program, one of the top suggestions was to reduce the time to bounty payout. We’re sure both professional and casual bug bounty hunters enjoy receiving a paycheck earlier than later. So, we took your feedback and sat down to improve our program.\n\nGoing forward, we will pay out a part of the bounty right at the moment when a report is triaged, which is, on average, five days after the report is submitted. That means cash in your pocket faster. Reports with severity of medium, high, or critical will be awarded $1000 when the report is triaged. The remainder will be paid when the report is resolved.\n\nAt GitLab, we believe in the value of [iteration](https://handbook.gitlab.com/handbook/values/#iteration). Paying out a partial bounty when the report is triaged is the first in a series of steps to speed up bounty payouts. We have many more ideas on how we can speed up bounty payouts and we’d like to move toward this with our community. If you have feedback regarding faster bounty payouts – or other areas where we can improve or grow – please share it with us! It’s this continual feedback loop and collaboration that will make us all successful.\n\n## Repeat reporters\nAnother key element that strengthens our program are our repeat reporters. We went to the 2019 HackerOne H1-702 event where we met with our top three hackers (since our bug bounty program launch through June 2019) to recognize their accomplishments and thank them for their impact on our program.\n\n![ngalog](https://about.gitlab.com/images/blogimages/h1-sept24/ngalog1.jpeg){: .shadow.small.center}\nOur AppSec team with [ngalog](https://hackerone.com/ngalog) at HackerOne’s H1-702 event.\n{: .note.text-center}\n\n![jobert](https://about.gitlab.com/images/blogimages/h1-sept24/Jobert1.jpeg){: .shadow.small.center}\nOur AppSec team with [jobert](https://hackerone.com/jobert) at HackerOne’s H1-702 event.\n{: .note.text-center}\n\n![fransrosen](https://about.gitlab.com/images/blogimages/h1-sept24/fransrosen1.jpeg){: .shadow.small.center}\nOur AppSec team with [fransrosen](https://hackerone.com/fransrosen) at HackerOne’s H1-702 event.\n{: .note.text-center}\n\nGitLab’s mission is, [everyone can contribute](/company/mission/#mission). Not just the most experienced hackers, and not just the reporters finding the greatest quantity of bugs or even the most impactful bugs, but all of the reporters in between. Your findings make us stronger.\n\n**So, with that in mind, let us introduce our...**\n\n## \u003Ci class=\"fab fa-gitlab fa-fw\" style=\"color:rgb(252,109,38); font-size:.99em\" aria-hidden=\"true\">\u003C/i>  \u003Ci class=\"fas fa-birthday-cake\" style=\"color:rgb(107,79,187); font-size:.99em\" aria-hidden=\"true\">\u003C/i>  One-year anniversary hacking contest \u003Ci class=\"fab fa-gitlab fa-fw\" style=\"color:rgb(107,79,187); font-size:.99em\" aria-hidden=\"true\">\u003C/i>  \u003Ci class=\"fas fa-bug\" style=\"color:rgb(252,109,38); font-size:.99em\" aria-hidden=\"true\">\u003C/i>\n{: .text-center}\n\nOur [one year anniversary](/blog/gitlab-hackerone-bug-bounty-program-is-public-today/) of taking our bug bounty program public is right around the corner. To celebrate a very successful first year, we want to recognize the outstanding contributions from our reporter community with a little something special.\n\n**We are running a community hacking contest starting October 1 (12 am ET) until November 30, 2019 (12 pm ET).** The top contributor in the following categories will receive a special reward:\n\n\u003Ci class=\"fas fa-address-card fa-fw\" style=\"color:rgb(46,46,46); font-size:.90em\" aria-hidden=\"true\">\u003C/i> **Most reputation points from submissions to our program.** This category is simple. Collect the most reputation points from submissions to our program and win!\n{: #id-card-black}\n\n\u003Ci class=\"far fa-address-card fa-fw\" style=\"color:rgb(56,13,117); font-size:.90em\" aria-hidden=\"true\">\u003C/i> **Most reputations points *collected by a reporter new to our program***. Getting started with a new bug bounty program is difficult. We want to recognize the effort you put in.\n{: #id-card-purple}\n\n\u003Ci class=\"fas fa-pencil-alt fa-fw\" style=\"color:rgb(219,58,33); font-size:.90em\" aria-hidden=\"true\">\u003C/i> **Best written report.** A well-written report goes a long way to demonstrate impact and to help us reproduce the problem.\n{: #id-pencil}\n\n\u003Ci class=\"far fa-lightbulb fa-fw\" style=\"color:rgb(252,161,33); font-size:.90em\" aria-hidden=\"true\">\u003C/i> **Most innovative report.** Sometimes reporters demonstrate great out-of-the-box thinking. For example, some reports group several low-severity findings into a high-impact vulnerability. We appreciate this creativity.\n{: #id-lightbulb}\n\n\u003Ci class=\"fas fa-rocket fa-fw\" style=\"color:rgb(252,109,38); font-size:.90em\" aria-hidden=\"true\">\u003C/i> **Most impactful finding.** At the end of the day, an impactful discovery is what we all strive for.\n{: #id-rocket}\n\n**The winners will be announced on December 12 via [GitLab blog](/blog/) post.** A contributor can win at most one category. Of course, regular bounties still apply to any of your findings. *Here’s a hint on a little something extra that the winners will get:*\n\n{::options parse_block_html=\"true\" /}\n**What’s orange and purple and goes hackety, hack?**\n{: .text-center}\n\nHappy hacking!\n\nPhoto by [Max DeRoin](https://www.pexels.com/@maxderoin?utm_content=attributionCopyText&utm_medium=referral&utm_source=pexels) on [Pexels](https://www.pexels.com/photo/close-up-of-computer-keyboard-249203/?utm_content=attributionCopyText&utm_medium=referral&utm_source=pexels)\n{: .note}\n",[267,720,9,3832],{"slug":5639,"featured":6,"template":680},"reducing-time-to-payout-and-launching-a-bug-bounty-anniversary-contest","content:en-us:blog:reducing-time-to-payout-and-launching-a-bug-bounty-anniversary-contest.yml","Reducing Time To Payout And Launching A Bug Bounty Anniversary Contest","en-us/blog/reducing-time-to-payout-and-launching-a-bug-bounty-anniversary-contest.yml","en-us/blog/reducing-time-to-payout-and-launching-a-bug-bounty-anniversary-contest",{"_path":5645,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5646,"content":5652,"config":5657,"_id":5659,"_type":14,"title":5660,"_source":16,"_file":5661,"_stem":5662,"_extension":19},"/en-us/blog/remote-enables-innovation",{"title":5647,"description":5648,"ogTitle":5647,"ogDescription":5648,"noIndex":6,"ogImage":5649,"ogUrl":5650,"ogSiteName":667,"ogType":668,"canonicalUrls":5650,"schema":5651},"How remote work enables rapid innovation at GitLab","At GitLab, remote isn’t a business operations risk, it’s a competitive advantage.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678666/Blog/Hero%20Images/paper-lanterns.jpg","https://about.gitlab.com/blog/remote-enables-innovation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How remote work enables rapid innovation at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Wu\"}],\n        \"datePublished\": \"2019-02-27\",\n      }",{"title":5647,"description":5648,"authors":5653,"heroImage":5649,"date":5654,"body":5655,"category":808,"tags":5656},[2035],"2019-02-27","\nI’m a Product Manager here at GitLab, primarily contributing to the [Plan stage](/direction/plan/)\nof the [DevOps lifecycle](/stages-devops-lifecycle/). I joined in November 2016 and I’ve witnessed incredible\ngrowth in GitLab the product as well as GitLab the team. Many\nnew hires have asked me during [coffee chats](/company/culture/all-remote/#coffee-chats)\nabout GitLab culture and remote work in particular, since we're an [all-remote](/company/culture/all-remote/)\ncompany. My view has evolved over this time and I wanted to share specifically why I think\nremote is _not_ a challenge to overcome, but actually a _competitive advantage_, at least for GitLab.\n\n## A remote journey\n\nWhen I joined GitLab, I thought remote was a challenge to overcome or at least\nto manage. It was a risk to be mitigated. For example, I really wanted daily standup\nmeetings with the engineering team I was working with. Silicon Valley-style tech\ncompanies and product management books tell us that frequent, synchronous, face-to-face\ncommunication is necessary for building successful products efficiently and to win\nin the marketplace. To my dismay at the time, we never had in-sync standups (and\nmy team today still doesn’t have them). But curiously, we nonetheless had immense\ncollaboration and continued to ship product at a high velocity. Something really\nweird and unexpected was going on.\n\nLater on, as I started getting comfortable [doing product the GitLab way](/handbook/product/),\nI started to think that remote wasn’t really a risk, but that there were just a\nfew negatives, and that the overall effect was net positive. See the [advantages and disadvantages of remote](/company/culture/all-remote/#advantages-for-employees).\n\nToday, I realize that even a positive-negative accounting of remote is insufficient\nto articulate what remote means at GitLab. I think that remote\n(along with a few other key crucial GitLab ingredients) gives us a differentiated\nand competitive advantage, in particular allowing us to innovate at a rapid pace\nthat is truly unique. Here's why:\n\n## Interdependent ingredients\n\nThere are a several crucial and interdependent GitLab ingredients that make remote\ntruly work in our favor:\n\n### Async communication\n\nRemote implies geographic diversity (since we hire all over the world),\nand because most folks work during the day, that further implies time zone diversity.\nConsequently, we prefer **[Async communication (primarily with text)](/handbook/communication/)** as we scale our organization in\nspace-time. Async demands everything be written down and that it be clear and concise.\nYou can’t afford a prolonged back-and-forth conversation because every round-trip\ntransaction is possibly 24 hours in the worst case. In particular, we prefer text\nbecause the internet and modern apps (for example [GitLab issues](https://docs.gitlab.com/ee/user/project/issues/)) has allowed text\nto be easily organizable, searchable, and even hyperlinked. Text is easy to parse\nand thus consume. It is a highly efficient form of communication, especially for\ntransactional collaboration.\n\n### Transparency\n\nThe async communication we reference is also digital, making it infinitely\nscalable. Unlike the printed page in a physical office, anybody should\nbe able to access a digital message. So, rather than re-erecting the walls and silos\nthat plague traditional organizations and inevitably block collaboration, we\nmake communications and work **[transparent](https://handbook.gitlab.com/handbook/values/#transparency)** by default.\nAdding a layer of permissions is necessary sometimes, and in those cases it becomes an overhead cost to manage\nand use (for example fixing a security bug.) The transmitter of communications\nneeds to figure out who should receive, and set the appropriate permissions. The\nreceiver themself needs additional work to access the content. It’s more pain. It\nadds up. So we try to avoid it when we can.\n\n>Because you know everything you write down will potentially be viewed by anyone – inside or even outside the company – simply telling the truth is the optimal and most efficient strategy\n\nTransparency also makes it really easy to tell the truth, and disincentivizes dishonesty.\nTelling the truth is simply the right thing to do, but it’s also a great strategy\nto grow a long-term sustainable business. In particular, because you know everything\nyou write down will potentially be viewed by anyone in the company or even outside\nthe company, simply telling the truth is the optimal and most efficient strategy\nand you will thus adopt it with little friction. You don’t have to make up slightly\ndifferent versions for different stakeholders. You don’t have to keep track of all\nthese versions. And you only need a single artifact to document that one source\nof truth, which will never be out of sync, because there’s only one! For\nus, that single source of truth is typically the description in an issue.\n\n### Everyone can contribute\n\nWith a single source of truth that is consumable by anybody, it allows **[everyone to contribute](/company/mission/#mission)**.\nEveryone has information parity. And so anyone is welcome to contribute. In fact,\nremember I mentioned above that the transmitter of information typically has an intended receiver\nin mind? In this case, oftentimes somebody who they didn’t expect can even participate\nand add value. This isn’t possible if there’s no transparency because artificial\nbarriers pre-close the opportunities of potential collaboration. Also, everyone\ncan contribute means future folks can participate too. You may start a conversation\non an idea that turns out to be suboptimal in the current circumstances. But it\nmight end up being just a timing issue. And so posterity might be able to recover\nthe old idea and ship a feature later on, taking advantage of all the discussions\nthat were had and made available publicly.\n\nEveryone can contribute also means that the diversity of ideas skyrockets. And so\nat GitLab, people often cross departments and offer some of the best ideas to solve\nbig challenging problems. But we still have [directly responsible individuals](/handbook/people-group/directly-responsible-individuals/)\nto make decisions in order to avoid analysis paralysis.\n\n### Iteration\n\nFinally, how can all this communication and collaboration truly function if the\nmechanisms are so transactional, distributed, and unstructured? It works because\nit forces us to be **[iterative](https://handbook.gitlab.com/handbook/values/#iteration)**. Most people think they understand iteration (myself\nincluded) before joining GitLab. But I’ve discovered over and over again that new\nfolks are surprised that this concept is taken to an extreme. Product\nand code are shipped in the absolute smallest piece possible in an effort to get\nfeedback and momentum. Implementing programs and processes at GitLab means breaking\noff the smallest chunk and then putting it into action right away. We still make\nbig, bold plans and big bets on the future. But we don’t obsess over extended analysis.\nInstead we find the smallest thing that we can do now and we do it. We believe that\nwaiting until tomorrow is an opportunity cost. Doing something small today is low\nrisk and results in immediate feedback. We have a [bias for action](https://handbook.gitlab.com/handbook/values/#bias-for-action).\n\n>We believe that waiting until tomorrow is an opportunity cost. Doing something small today is low\nrisk and results in immediate feedback.\n\nAnd so if all our communication and collaboration is focused on small iterations,\nthe scope of a typical  problem is small and manageable. And it turns out (unsurprisingly)\nmore people are willing to participate in a small problem if it literally takes\nthem a few moments to voluntarily glance at an issue description, instead of being\nforced to attend a two-hour slide presentation explaining a big problem.\nAnd since the problem is made transparent by default, the pool of contributors is\nvery high, as mentioned earlier. Personally, I am actively involved\nin at least 20 to 30 parallel problem conversations on a daily basis. It is impossible\nfor anyone to achieve that level of productivity if all to those conversations required\ndedicated, ongoing, synchronous meetings. This results in an incredible rate of collaboration\nfor myself. Multiply that by all team members at GitLab, and then also all GitLab\ncommunity members further still, and you can see now why GitLab’s pace of innovation\nis ridiculously high.\n\nRemote is not a challenge for GitLab to overcome. It’s a clear business advantage.\n\n## Ending caveat\n\nThe picture I’ve painted here is one of constant messaging and wild ideas. And\nthat’s intentional because it’s true. New folks joining GitLab often are inundated\nby the number of discussions they find themselves involved in after several weeks\nin. This is indeed an ongoing risk for GitLab especially as we scale and the level\nof ideation grows exponentially in relation to headcount (since communication links\ngrow exponentially as nodes in a people network grow). I’ve observed that GitLab\nteam members usually figure out a way to cope soon enough, and typically become\nmore selective in their communications over time. I think this is a good general\nstrategy overall, because good ideas tend to get more attention, and we essentially\nrely on the wisdom of the crowds to surface them. Of course we still have well-defined\nroles and responsibilities that serve as guardrails too, that allow subject matter\nexperts and directly responsible individuals to strategically guide our innovation\nin the right general direction.\n\nHow are you making remote work work? Let us know in the comments or tweet us [@gitlab](https://twitter.com/gitlab).\n\n[Cover image](https://unsplash.com/photos/TaXPogWdzR0) by [amseaman](https://unsplash.com/@amseaman) on Unsplash\n{: .note}\n",[811,9,832,873,723],{"slug":5658,"featured":6,"template":680},"remote-enables-innovation","content:en-us:blog:remote-enables-innovation.yml","Remote Enables Innovation","en-us/blog/remote-enables-innovation.yml","en-us/blog/remote-enables-innovation",{"_path":5664,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5665,"content":5671,"config":5676,"_id":5678,"_type":14,"title":5679,"_source":16,"_file":5680,"_stem":5681,"_extension":19},"/en-us/blog/remote-kids-part-four",{"title":5666,"description":5667,"ogTitle":5666,"ogDescription":5667,"noIndex":6,"ogImage":5668,"ogUrl":5669,"ogSiteName":667,"ogType":668,"canonicalUrls":5669,"schema":5670},"5 Things to keep in mind while working remotely with kids","A flex schedule, realistic expectations, and a positive attitude will make it easier to work with kids around.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680690/Blog/Hero%20Images/working-at-home-with-kids.jpg","https://about.gitlab.com/blog/remote-kids-part-four","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Things to keep in mind while working remotely with kids\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean McGivern\"}],\n        \"datePublished\": \"2019-08-08\",\n      }",{"title":5666,"description":5667,"authors":5672,"heroImage":5668,"date":5673,"body":5674,"category":808,"tags":5675},[4083],"2019-08-08","\n\n_This is the fourth and final blog post in our series on working remotely with children of all ages. In part one we looked at [maternity/paternity leave policies around the world](/blog/how-is-it-being-a-new-mom-working-for-gitlab/); in part two Jarka Košanová shared her experience [working at GitLab with a newborn](/blog/balancing-career-and-baby/); and in part three GitLab team members had good advice to [make the most of workspace shared with children](/blog/working-remotely-with-children-at-home/)._\n\nDuring [GitLab Contribute 2019](/blog/contribute-wrap-up/) in\nNew Orleans, facilitators [Lyle Kozloff][lyle] and myself, [Sean McGivern][smcgivern], hosted\nfour unconference sessions about\nworking remotely with children at home. GitLab team members had helpful and practical\nadvice on everything from flexibility to time with a partner.\n\n## 1. Embrace a flexible schedule\n\n> My son started playschool (recently) and it's only two hours. I don't go home\nbecause it's a waste of time so I work from there – no coding, no\ndeep work, just going through mentions and stuff. – [_Heinrich Lee Yu, backend engineer_][engwan]\n\n> My daughter has always been a great sleeper, so my husband\nand I wake up around 5:00 each morning (he also works remotely)\nto get a head start on work. We are usually able to get a couple\nhours of work in before she even wakes up, freeing up our afternoon\nto spend time with her. – [_Annabel Dunstone Gray, product designer_][annabeldunstone]\n\nBy [working asynchronously](/handbook/communication/#introduction) we can arrange our time to match our own schedules. (This doesn't only apply to parents, of course; anyone can do this.) Different roles have different expectations, of course. If you work in Support you’ll need to provide timezone coverage, but even within that, there\nis a lot of scope to arrange your work schedule to match your childcare,\nrather than the other way around.\n\n## 2. Be more disciplined with that schedule\n\n> I had to get a lot more disciplined with my time. When I was young and\nsingle I could just get behind and pull an all-nighter, but I can't do\n that any more. I'm more efficient. There's a switching cost, but\n you'll be better in the long run. – [_Eric Johnson, VP of Engineering_][edjdev]\n\n> Having kids will make you develop this efficiency, I have to pick my\n son up from kindergarten at four and sometimes no one else can do that, so I need\n to schedule my work around that. - [_Grzegorz Bizon, staff backend engineer_][grzesiek]\n\nBeing flexible doesn't mean being undisciplined. With children at home,\nthere are a lot of competing demands on your time. For many people, this\nmeans that they become more efficient out of necessity. It’s hard to partly work and partly do something and then make up for it with extra hours at the keyboard, because there are no more spare hours.\n\n## 3. The role of relationships\n\n> My wife and I made an agreement that we're not going to let kids stop\nus doing sports. We play on the same teams, and we just bring our\nkids. There's normally enough people around to help keep an eye on\nthem while we're playing. It's hard when my wife's working one night,\nthough. – [_Chris Maurer – manager, Customer Success, Public Sector_][cdmaurer13]\n\n> When we had the first kid, we were doing everything as a couple:\nwhatever it was, we were together. Then, with the arrival of our\nsecond kid, we felt like we had to care for one kid each. With time,\nthe fear of ending up alone with both kids had taken root. We had to\nchange something: we simply had to let go. One person can care for both\nkids for the night, and the other one is free to go out and do\nwhatever they want. Turns out this actually totally removed the fear\nof being alone. We both let each other go out to do something social to\nreinvigorate a bit. We even started bouldering, but we never go on\nthe same night. – [_Micaël Bergeron, backend engineer_][mbergeron]\n\nIt's important to keep doing things you enjoy when you have children. It\nsets a good model for your children, and will make you happier which\nwill help you be a better parent.\n\n## 4. Set expectations\n\n> It took us an entire child to realise that while co-suffering feels\nlike the right thing to do, it's less efficient – you both end up tired\nand exhausted. – [_Lyle Kozloff, Support engineering manager_][lyle]\n\n> Don't keep count of the things that you and your partner are doing,\njust do everything you can. I did the majority of the raising the\nbabies, but my husband would take night things. – [_Karlia Kue,\nBusiness Systems Analyst_][kxkue]\n\nThis relates to every other point here. The worst thing that can happen\nis that people get resentful or stressed, and that is more likely to\nhappen when it's not clear whose responsibility it is.\n\nOn a personal note, and although it sounds a little goofy: The concept\nof [directly responsible individuals](/handbook/people-group/directly-responsible-individuals/) we use at GitLab also helped my partner and I manage the way we think about who's responsible for our\nson at any point.\n\n## 5. Enjoy it\n\n> My daughter is my best friend, and I am so blessed to be able to see her\ngrow into her own little person while still accomplishing my professional goals.\nSeeing her interact (\"Hi!\" for everyone) with all of my GitLab teammates at\nContribute was also very special. – [_Brittany Rohde, Compensation & Benefits Manager_][brittanyr]\n\nI really appreciate the amount of time I can spend with my son. I see\nhim for several hours every single day. Coming to New Orleans for\nContribute was hard!\n\nHaving a child has been the best part of my life so far. A big part of\nthat was having a job that meant I could spend a good amount of time\nwith him every day without feeling like I was doing something wrong or\nnot being productive.\n\n## Remote work makes it easier\n\nWorking remotely doesn't change the fact that being a parent is\nchallenging, but it does help provide time and space to navigate those\nchallenges.\n\nWhat tips have you stumbled across while working remotely with kids at\nhome? Let us know in the comments or tweet us [@gitlab](https://twitter.com/gitlab).\n\nPhoto by [Baby Natur](https://unsplash.com/@babynatur?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/kids-toys?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n[annabeldunstone]: /company/team/#annabeldunstone\n[brittanyr]: /company/team/#brittanyr\n[cdmaurer13]: /company/team/#mauichief\n[edjdev]: /company/team/#edjdev\n[engwan]: /company/team/#engwan\n[grzesiek]: /company/team/#GrzegorzBizon\n[kxkue]: /company/team/#karliakue\n[lyle]: /company/team/#lkozloff\n[mbergeron]: /company/team/#micaelbergeron\n[smcgivern]: /company/team/#mcgivernsa\n",[832,810,9],{"slug":5677,"featured":6,"template":680},"remote-kids-part-four","content:en-us:blog:remote-kids-part-four.yml","Remote Kids Part Four","en-us/blog/remote-kids-part-four.yml","en-us/blog/remote-kids-part-four",{"_path":5683,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5684,"content":5690,"config":5696,"_id":5698,"_type":14,"title":5699,"_source":16,"_file":5700,"_stem":5701,"_extension":19},"/en-us/blog/remote-work-done-right",{"title":5685,"description":5686,"ogTitle":5685,"ogDescription":5686,"noIndex":6,"ogImage":5687,"ogUrl":5688,"ogSiteName":667,"ogType":668,"canonicalUrls":5688,"schema":5689},"Remote work, done right","Guest author Nolan Myers hated conference calls. Here's how we changed his mind.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679812/Blog/Hero%20Images/remote-work-done-right.jpg","https://about.gitlab.com/blog/remote-work-done-right","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Remote work, done right\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nolan Myers\"}],\n        \"datePublished\": \"2018-03-16\",\n      }",{"title":5685,"description":5686,"authors":5691,"heroImage":5687,"date":5693,"body":5694,"category":808,"tags":5695},[5692],"Nolan Myers","2018-03-16","\n\n_GitLab CEO Sid Sijbrandij occasionally sits down for a \"[pick your brain](/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)\"\nmeeting with people seeking advice on open source, remote work, or discussion of other things related to GitLab._\n\nI’ve been on many terrible conference calls. The gentle voice telling me to enter my nine-digit pin, followed by the pound sign, feels like disappointment before the call even begins. That’s why I was so surprised to hear that GitLab – a company of over 200 people – runs without an office. How could anything get done when every meeting was remote?\n\n\u003C!-- more -->\n\nSeeing is believing, so I jumped at the opportunity to watch firsthand. What I learned convinced me that remote meetings can be just as good as in person, and maybe even better. Here’s what impressed me:\n\n### Video conference for all\n\nEveryone joined a Zoom call, each from their own computer. Most everyone had their cameras on, which gave enough visual cues to see their mood; sometimes even an understanding of who they are, like seeing a pool table or disassembled motorcycle behind them. The video format helped enforce some good meeting practices. Only one speaker at a time; a singular focus of attention, either a person or a shared screen. Meetings started on time, never having to wait for a previous group to clear a conference room. Having everyone join independently also worked much better than having a few people in a room and a few remotes, which inevitably creates a power-center in the room.\n\n>The video format helped enforce some good meeting practices: only one speaker at a time; a singular focus of attention\n\n### Create a live agenda in a shared document\n\nEach meeting started with an agenda in a shared Google Doc. They coupled this with a “write before you speak” etiquette. Anyone was welcome to speak, and added a brief summary of their question or comment into the shared doc before chiming in. This encouraged the speaker to be deliberate about their point, think about where in the flow it made most sense, and to know they’d get the floor when appropriate. It was kind of a marvel to see bullets and sub-bullets evolve during the meeting. A task owner typed “TODO: follow up” right as they said “I got it.” Even better, they were left with detailed meeting notes for posterity.\n\n>It was kind of a marvel to see bullets and sub-bullets evolve during the meeting. A task owner typed “TODO: follow up” right as they said “I got it.”\n\n### Embrace multitasking\n\nHow often have you heard that you should give a meeting your undivided attention? And how often have you actually believed it? GitLab embraces multitasking. Having everyone together ensures the right people are there for important conversations. But inevitably a packed meeting agenda will have sections more and less relevant to a variety of participants. Unlike in a room, a video call where someone tunes out for a bit doesn’t hamper the effectiveness of those focused on a conversation. The shared agenda let everyone know when they were needed, and each topic had the right people ready to contribute.\n\n### Caveats and considerations\n\nThis process felt like a miniature miracle to watch, but does need the right tools. GitLab relied on Zoom and it worked well. One external call used WebEx, and its longer latency led people accidentally to talk over one another. Google Docs was a must for the shared agenda. Everyone had set up a reasonable workspace with fast internet and a camera.\n\nI’d also add that I saw this work well for both update- and decision-oriented meetings. Would this approach support technical brainstorming meetings too? Sometimes drawing on a whiteboard works much better than typing, especially if you have a diagram. Zoom does have a whiteboard feature; perhaps with a Stylus you could do this as well as in person. I’m curious to see it in practice.\n\nWhen I first heard of GitLab’s remote-only hiring, I immediately saw the benefits of hiring in lower-rent locations and not paying for office space. I assumed that it cost some productivity through effective collaboration. Now I see video calls done right can beat all but the best traditional conference room meetings.\n\n## About the guest author\n\nNolan Myers advises startups on organizational development and customer success, leveraging his executive experience in building high-performing products and teams. He also has passions for classical music, fine cuisine, and urban design. Learn more on his [LinkedIn](https://linkedin.com/in/nolanmyers).\n\nPhoto by [Christin Hume](https://unsplash.com/photos/slbqShqAhEo) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[832,9,723,811,745],{"slug":5697,"featured":6,"template":680},"remote-work-done-right","content:en-us:blog:remote-work-done-right.yml","Remote Work Done Right","en-us/blog/remote-work-done-right.yml","en-us/blog/remote-work-done-right",{"_path":5703,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5704,"content":5710,"config":5715,"_id":5717,"_type":14,"title":5718,"_source":16,"_file":5719,"_stem":5720,"_extension":19},"/en-us/blog/resources-for-companies-embracing-remote-work",{"title":5705,"description":5706,"ogTitle":5705,"ogDescription":5706,"noIndex":6,"ogImage":5707,"ogUrl":5708,"ogSiteName":667,"ogType":668,"canonicalUrls":5708,"schema":5709},"Resources for companies embracing remote work","We're sharing our comprehensive guide to remote work with companies who are now embracing a remote environment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679651/Blog/Hero%20Images/gitlab-all-remote-cover-2560x1440.jpg","https://about.gitlab.com/blog/resources-for-companies-embracing-remote-work","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Resources for companies embracing remote work\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Murph\"}],\n        \"datePublished\": \"2020-03-06\",\n      }",{"title":5705,"description":5706,"authors":5711,"heroImage":5707,"date":5712,"body":5713,"category":675,"tags":5714},[890],"2020-03-06","\n\nDue to global issues concerning [COVID-19 (Coronavirus)](https://www.cdc.gov/coronavirus/2019-ncov/index.html), there has been a notable shift in appetite for working remotely. Companies previously against remote work are suddenly considering remote or implementing remote, with varying degrees of intentionality.\n\nIn the coming weeks and months, your company may have short- or medium-term needs to establish a work-from-home protocol, even if you’re [unsure about long-term commitment to remote](/company/culture/all-remote/hybrid-remote/), and how it will impact your business.\n\n## Resources for going remote\n\n![GitLab global team map graphic](https://about.gitlab.com/images/blogimages/gitlab-all-remote-laptop-global-map.jpg){: .shadow.medium.center}\n\nGiven these realities, companies are being tasked with advising their workforce on how to work from home. With no warning, this is a tall task. \n\nThankfully, these companies do not have to start from scratch. As the world's largest all-remote company, GitLab has learned a lot in scaling from a few people scattered across Europe to a 1,200+ person team in over 65 countries and regions. \n\nWe've built a [**remote work emergency toolkit**](/company/culture/all-remote/remote-work-emergency-plan/) for leaders and managers, and a [**remote work starter guide**](/company/culture/all-remote/remote-work-starter-guide/) for employees. This is a fast boot guide with five things you can focus on right now to maximize stability.\n\nAdditionally, we’ve architected [dozens of comprehensive guides](/company/culture/all-remote/guide/) to implementing remote, covering topics such as:\n\n* [Pitfalls to watch out for when embracing remote](/company/culture/all-remote/what-not-to-do/)\n* [Embracing asynchronous communication](/company/culture/all-remote/asynchronous/)\n* [Transitioning a company to remote](/company/culture/all-remote/transition/)\n* [How to use forcing functions to work remote-first](/company/culture/all-remote/how-to-work-remote-first/)\n* [Combating burnout, isolation, and anxiety](/company/culture/all-remote/mental-health/)\n* [Understanding the phases of remote adaptation](/company/culture/all-remote/phases-of-remote-adaptation/)\n* [Remote onboarding](/company/culture/all-remote/onboarding/)\n* [Meetings](/company/culture/all-remote/meetings/)\n* [Management](/company/culture/all-remote/management/)\n* [Asynchronous workflows](/company/culture/all-remote/asynchronous/)\n* [Handbook-first documentation](/company/culture/all-remote/handbook-first-documentation/)\n* [Adopting a self-service mindset](/company/culture/all-remote/self-service/)\n* [Learning and development](/company/culture/all-remote/learning-and-development/)\n* [Workspaces](/company/culture/all-remote/workspace/)\n* [Informal communication](/company/culture/all-remote/informal-communication/)\n* [Scaling](/company/culture/all-remote/scaling/)\n* [Getting started in a remote role](/company/culture/all-remote/getting-started/)\n* [Communicating effectively and responsibly through text](/company/culture/all-remote/effective-communication/)\n\nAll of these guides are open, and we encourage other companies to study them, copy them, implement them, and even contribute learnings back to them. It’s an end-to-end toolkit on getting started with remote, iterating as a team, and thriving in an officeless environment. \n\n## Remote work benefits your customers\n\nAs a recent [Economist article](https://www.economist.com/business/2020/03/05/covid-19-is-foisting-changes-on-business-that-could-be-beneficial) points out, COVID-19 is causing companies to rethink their business from a supply chain perspective. Having a remote workforce can lessen the disruption of the supply chain if the product is not a tangible good or service. At GitLab, utilizing a SaaS model and being an all-remote company has provided resiliency to these issues. Our supply chain is not affected by the global impact of COVID-19; however, onsite services may be limited in affected areas. \n\nGitLab, the open source product, and other tools like Zoom and Slack help teams collaborate through asynchronous workflows which not only enable remote work, but may be helpful in times of global crises.\n\n## The importance of in-person interactions\n\nAs an all-remote company, in-person interactions don’t occur by default. In turn, GitLab is [intentional](/company/culture/all-remote/in-person/) about ensuring that team members are given opportunities to meet other team members in a shared physical space. Each year, GitLab offers every team member the opportunity to gather in a new city for a week-long unconference. Whereas most summits are focused on networking and productivity, [GitLab Contribute](/events/gitlab-contribute/) is unique. Our team bonds over video calls year-round, so this week of in-person excursions is one that many mark on their calendar as can’t-miss. When your default is virtual, the chance to explore a new place with colleagues is invaluable. \n\nWith the increased impact of COVID-19 across the world, we have made the difficult decision to cancel the planned March 2020 edition of Contribute. The health and safety of our team members and the community in the city we visit is our highest priority. We are disappointed, but believe this to be the best decision for everyone involved. \n\nBeyond Contribute, we are monitoring the situation carefully and providing team members with [CDC recommendations](https://www.cdc.gov/coronavirus/2019-ncov/about/prevention-treatment.html) to help avoid sharing contagious viruses or illnesses when traveling or meeting with other team members. Our global, [all-remote structure](/company/culture/all-remote/guide/) allows us to continue work as usual with video calls, chat, and other collaboration tools and services to avoid unnecessary travel. \n\n## Everyone can contribute\n\nSharing our learnings with the world is at the heart of our mission: [everyone can contribute](https://handbook.gitlab.com/handbook/values/#mission). GitLab believes that all-remote is the [future of work](/company/culture/all-remote/vision/), and remote companies have a shared responsibility to show the way for other organizations who are embracing it. If you or your company has an experience that would benefit the greater world, consider creating a [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/) and adding a contribution to the all-remote handbook.\n",[9,832,675],{"slug":5716,"featured":6,"template":680},"resources-for-companies-embracing-remote-work","content:en-us:blog:resources-for-companies-embracing-remote-work.yml","Resources For Companies Embracing Remote Work","en-us/blog/resources-for-companies-embracing-remote-work.yml","en-us/blog/resources-for-companies-embracing-remote-work",{"_path":5722,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5723,"content":5729,"config":5735,"_id":5737,"_type":14,"title":5738,"_source":16,"_file":5739,"_stem":5740,"_extension":19},"/en-us/blog/scaling-our-use-of-sidekiq",{"title":5724,"description":5725,"ogTitle":5724,"ogDescription":5725,"noIndex":6,"ogImage":5726,"ogUrl":5727,"ogSiteName":667,"ogType":668,"canonicalUrls":5727,"schema":5728},"How we scaled async workload processing at GitLab.com using Sidekiq","Sidekiq was a great tool for async processing until it couldn't keep up. Here's how we made it scale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667068/Blog/Hero%20Images/sidekiqmountain.jpg","https://about.gitlab.com/blog/scaling-our-use-of-sidekiq","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we scaled async workload processing at GitLab.com using Sidekiq\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rachel Nienaber\"}],\n        \"datePublished\": \"2020-06-24\",\n      }",{"title":5724,"description":5725,"authors":5730,"heroImage":5726,"date":5732,"body":5733,"category":743,"tags":5734},[5731],"Rachel Nienaber","2020-06-24","## Sidekiq at GitLab\n\nGitLab is a Ruby-on-Rails application that processes a lot of data. Much of this processing can be done asynchronously,\nand one of the solutions we use to accomplish this is [Sidekiq](https://github.com/mperham/sidekiq/wiki) which is a background-processing\nframework for Ruby. It handles jobs that are better processed asynchronously outside the web request/response cycle.\n\nThere are a few terms that that we'll use in this post:\n\n* A **worker class** is a class defined in our application to process a task in Sidekiq.\n* A **job** is an instance of a worker class, so each job represents a single task.\n* A **queue** is a collection of jobs (potentially for different worker classes) that are waiting to be processed.\n* A **worker thread** is a thread processing jobs in particular queues. Each Sidekiq process can have multiple worker threads.\n\nThen there are two terms specific to GitLab.com:\n\n* A **Sidekiq role** is a configuration for a particular group of queues. For instance, we might have a `push_actions` role that is for processing the `post_receive` and `process_commit` queues.\n* A **Sidekiq node** is an instance of the GitLab application for a Sidekiq role. A Sidekiq node can have multiple Sidekiq processes.\n\nBack in 2013, in version 6.3 of GitLab, every Sidekiq worker class had its own queue. We weren't strict in monitoring the creation of\nnew worker classes. There was no strategic plan for assigning queues to where they would execute.\n\nIn 2016, we tried to introduce order again, and rearranged the queues to be based on features. We followed this with a change in\n2017 to have a dedicated queue for each worker class again, and we were able to monitor queues more accurately and impose specific\nthrottles and limits to each. It was easy to quickly make decisions about the queues as they were running because of how\nthe work was distributed. The queues were grouped, and the names of these groups were `realtime`, `asap`, and `besteffort` for example.\n\nAt the time, we knew that this was not the approach recommended by the author of Sidekiq, Mike Perham, but we felt that we knew what\nthe trade-offs were. In fact, Mike wrote: \n\n> “I don't recommend having more than a handful of queues. Lots of queues makes for a more complex\n> system [and Sidekiq Pro cannot reliably](https://github.com/antirez/redis/issues/1785) handle multiple queues without\n> polling. M Sidekiq Pro processes polling N queues means O(M*N) operations per second slamming Redis.”\n\nFrom [https://github.com/mperham/sidekiq/wiki/Advanced-Options#queues](https://github.com/mperham/sidekiq/wiki/Advanced-Options#queues)\n\nThis served us well for nearly two years before this approach no longer matched our scaling needs.\n\n### Pressure from availability issues\n\nIn mid-2019 GitLab.com experienced several different major incidents related to the way we\nprocess background queues.\n\nExamples of these incidents:\n- [Gitaly n+1 calls caused bad latency and resulted in the Sidekiq queues growing](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/7479).\nThis was due to the way we processed tags in Gitaly.\n- A user generated many notes on a single commit which [slowed down the new_note Sidekiq queue](https://gitlab.com/gitlab-com/gl-infra/production/issues/1028)\nand led to a delay of sending out notifications.\n- CI jobs took very long to complete because [jobs in the pipeline_processing:pipeline_process Sidekiq queue piled up](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/7402).\n2 pipelines caused a high amount of Sidekiq jobs, Sidekiq pipeline nodes were maxing out their CPU, pipeline_processing\njobs were causing many SQL calls and the pgbouncer pool for Sidekiq was becoming saturated.\n\nAll of these were showing that we needed to take action.\n\n![Sidekiq throughput per job](https://about.gitlab.com/images/blogimages/sidekiq_throughput_per_job.png){: .shadow}\n\nThis image shows how many jobs we process per second over a 24 hour period. This shows the variety of jobs and\ngives an idea of the scale of jobs in relation to each other.\n\n### Improvements\n\n#### Changing the relationship between jobs and Sidekiq roles\n\nIn [infrastructure#7219 (closed)](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/7219) we significantly\naltered our approach for how jobs were related to Sidekiq roles.\n\nWe started from a position where:\n1. We had a many-to-many relationship between Sidekiq jobs and Sidekiq roles.\n   1. For example, most pipeline jobs ran on the `besteffort` nodes, but some ran on the pipeline nodes.\n   1. Some jobs ran on up to three types of node: eg `realtime`, `asap` and `besteffort` priorities.\n1. Worker threads were reserved for single queues.\n   1. For example, one eighth of the `realtime` queue might be reserved for new_note jobs. In the event of a glut of\n  new_note jobs, most of the fleet would sit idle while one worker thread would be saturated. Worse, adding more nodes would\n  only increase processing power by 1/8th of a node, not the full compute capacity of the new node.\n1. Urgent and non-urgent jobs would be in the same queue.\n   1. For example, some jobs in the `realtime` queue would take up to 10 minutes to process.\n   1. This is a bit like allowing overloaded trolleys in the 10 items-or-less lane.\n\nOnce the issue was completed, we now had:\n1. A one-to-one relationship between Sidekiq jobs and Sidekiq roles\n   1. Each job will execute on exactly one Sidekiq role\n1. All worker threads will run all jobs, and each Sidekiq node will have the same number of worker threads\n   1. When a glut of jobs comes in, 100% of compute on a node can be dedicated to executing the jobs\n1. Slow jobs and fast jobs are kept apart\n   1. The 10 items or less lane is now being enforced.\n\nWhile this was a significant improvement, it introduced some technical debt. We fixed everything for a moment in time,\nknowing that as soon as the application changed this would be out of date, and as time went on, would only get more out\nof date until we were back in the same position. To try and mitigate this in future, we started to look at classifying\nthe workloads and using queue selectors.\n\n#### Queue Selectors Deployed in Sidekiq Cluster\n\nIn the\n[Background Processing Improvements Epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/96), we looked at ways\nthat we could simplify the structure so that background processing could be in a position to scale to 100x the traffic\nat the time. We also needed the processing to be unsurprising. Operators (and developers) should understand where a job\nwill run, why it is queueing up and how to reduce queues. We decided to move to using [queue selectors](https://docs.gitlab.com/ee/administration/sidekiq/extra_sidekiq_processes.html)\nto help us to keep the queue definitions correct. (This approach is still experimental).\n\nIn addition, the infrastructure team should not reactively (and manually) route Sidekiq jobs to priority fleets, as\nwas the situation previously. Developers should have the ability to specify the requirements of their workloads and\nhave these automatically processed on a queue designed to support that type of work.\n\nSidekiq processes can be configured to select specific queues for processing. Instead of making this selection by name,\nwe wanted to make the selection on how the workload for that queue was classified.\n\nWe came up with an approach for classifying background jobs by their workload and building a sustainable way of grouping\nsimilar workloads together.\n\nWhen a new job is created, developers need to do this to classify the workload. This is done through\n- Specifying the [urgency of the job](https://docs.gitlab.com/ee/development/sidekiq/index.html). The options\nare `high`, `low` and `none`. If the delay of a job would have user impact, then the job is `high` urgency.\n- Noting if the [job has external dependencies](https://docs.gitlab.com/ee/development/sidekiq/index.html)\nthat could impact their availability. (For example, if they communicate with user-specified Kubernetes clusters).\n- Adding an [annotation declaring if the worker class will be cpu-bound or memory-bound](https://docs.gitlab.com/ee/development/sidekiq/index.html). Knowing\nthis allows us to make decisions around how much thread concurrency a Ruby process can tolerate, or targeting memory-bound\njobs to low-concurrency, high-memory nodes.\n\nThere is additional guidance available to [determine if the worker class should be marked as cpu-bound](https://docs.gitlab.com/ee/development/sidekiq/index.html).\n\n#### SLAs are based on these attributes\n\n1. High urgency jobs should not queue for more than 10 seconds.\n1. High urgency jobs should not take more than 10 seconds to execute (this SLA is the responsibility of the owning team to ensure that high throughput is maintained).\n1. Low urgency jobs should not queue for more than 1 minute.\n1. Jobs without urgency have no queue SLA.\n1. Non-high urgency jobs should not take more than 5 minutes to execute.\n\nIn each case, the queuing SLAs are the responsibility of the infrastructure team, as they need to ensure that the fleet is\ncorrectly provisioned to meet the SLA.\n\nThe execution latency SLAs are the responsibility of the development team owning the worker class, as they need to ensure that the\nworker class is sufficiently performant to ensure throughput.\n\n![Sidekiq certain queues spike](https://about.gitlab.com/images/blogimages/sidekiq_authorized_projects.png){: .shadow}\n\nThis image shows the challenges we faced by having jobs of different urgency running on the same queue.\nThe purple lines show spikes from one particular worker, where many jobs were added to the queue,\ncausing delays to other jobs which were often of equal or higher importance.\n\n### Challenge during rollout - BRPOP\n\nAs the number of background queues in the GitLab application grows, this approach continues to burden our Sidekiq Redis\nservers. On GitLab.com, our `catchall` Sidekiq nodes monitor about 200 queues, and the Redis [BRPOP](https://redis.io/commands/brpop)\ncommands used to monitor the queues consume a significant amount of time (by Redis latency standards).\n\nThe number of clients listening made this problem worse. For `besteffort` we had 7 nodes, each running 8 processes,\nwith 15 threads watching those queues - meaning 840 clients.\n\nThe command causing the problem was BRPOP. The time taken to perform this command also relates\nto the number of listeners on those keys. The addition of multiple keys increases contention in the system which causes\nlots of connections to block. And when the key list is longer the problem gets worse. The keylist represents the number of\nqueues, the more queues we have, the more keys we are listening to. We saw this problem on the nodes that process the most queues.\n\nWe raised an issue in the Redis issue tracker about the [performance we observed when many clients performed BRPOP on the\nsame key](https://github.com/antirez/redis/issues/7071). It was fantastic when [Salvatore](https://github.com/antirez)\nresponded within the hour and the patch was available the same day!  This fix was made in Redis 6 and backported to Redis 5.\n[Omnibus has also been upgraded to use this fix](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4126), and it will\nbe available in the major release 13.0.\n\n### Current State (as of June 2020)\n\nMigrating to these new selectors has been completed as of late April 2020.\n\nWe reduced our Sidekiq fleet from 49 nodes with 314 CPUs, to 26 nodes with 158 CPUs. This has also reduced our cost.\nThe average utilization is more evenly spread across the new fleets.\n\nAlso, we have [moved Sidekiq-cluster to Core](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/181). Previously, running\nSidekiq in clustered mode (i.e. spawning more than one process) was\ntechnically only available as part of GitLab EE distributions, and for self-managed environments only in the Starter+ tiers.\nBecause of that, when booting Sidekiq up in a development env with the GDK, the least common denominator was assumed,\nwhich was to run Sidekiq in a single-process setup. That can be a problem, because it means there is a divergence between\nthe environment developers work on, and what will actually run in production (i.e. gitlab.com and higher-tier self-managed environments).\n\nIn [release 13.0](/releases/2020/06/22/gitlab-13-1-released/) Sidekiq Cluster is used by default.\n\nWe’re also better placed to migrate to Kubernetes.  The selector approach is a lot more compatible with making good\ndecisions about things like CPU allocations + limits for Kubernetes workloads, and this will make the job of our delivery\nteam easier, leading to further cost reductions from auto-scaling deployed resources to match actual load.\n\nOur next piece of work with Sidekiq will be to [reduce the number of queues that we need to watch](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/194)\nand we will post a follow-up to this blog post when the work is completed.\n\n**Read more about infrastructure issues:**\n\n[Faster pipelines with DAG](/blog/directed-acyclic-graph/)\n\n[Keep Kubernetes runners moving](/blog/best-practices-for-kubernetes-runners/)\n\n[Understand parent-child pipelines](/blog/parent-child-pipelines/)\n\nCover image by [Jerry Zhang](https://unsplash.com/@z734923105) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,1295,723],{"slug":5736,"featured":6,"template":680},"scaling-our-use-of-sidekiq","content:en-us:blog:scaling-our-use-of-sidekiq.yml","Scaling Our Use Of Sidekiq","en-us/blog/scaling-our-use-of-sidekiq.yml","en-us/blog/scaling-our-use-of-sidekiq",{"_path":5742,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5743,"content":5749,"config":5754,"_id":5756,"_type":14,"title":5757,"_source":16,"_file":5758,"_stem":5759,"_extension":19},"/en-us/blog/scaling-the-gitlab-database",{"title":5744,"description":5745,"ogTitle":5744,"ogDescription":5745,"noIndex":6,"ogImage":5746,"ogUrl":5747,"ogSiteName":667,"ogType":668,"canonicalUrls":5747,"schema":5748},"Scaling the GitLab database","An in-depth look at the challenges faced when scaling the GitLab database and the solutions we applied to help solve the problems with our database setup.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666699/Blog/Hero%20Images/banner.jpg","https://about.gitlab.com/blog/scaling-the-gitlab-database","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Scaling the GitLab database\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yorick Peterse\"}],\n        \"datePublished\": \"2017-10-02\",\n      }",{"title":5744,"description":5745,"authors":5750,"heroImage":5746,"date":5751,"body":5752,"category":743,"tags":5753},[740],"2017-10-02","\nFor a long time GitLab.com used a single PostgreSQL database server and a single\nreplica for disaster recovery purposes. This worked reasonably well for the\nfirst few years of GitLab.com's existence, but over time we began seeing more and\nmore problems with this setup. In this article we'll take a look at what we did\nto help solve these problems for both GitLab.com and self-managed GitLab\ninstances.\n\n\u003C!-- more -->\n\nFor example, the database was under constant pressure, with CPU utilization\nhovering around 70 percent almost all the time. Not because we used all\navailable resources in the best way possible, but because we were bombarding the\nserver with too many (badly optimized) queries. We realized we needed a better\nsetup that would allow us to balance the load and make GitLab.com more resilient\nto any problems that may occur on the primary database server.\n\nWhen tackling these problems using PostgreSQL there are essentially four\ntechniques you can apply:\n\n1. Optimize your application code so the queries are more efficient (and\n   ideally use fewer resources).\n2. Use a connection pooler to reduce the number of\n   database connections (and associated resources) necessary.\n3. Balance the load across multiple database servers.\n4. Shard your database.\n\nOptimizing the application code is something we have been working on actively\nfor the past two years, but it's not a final solution. Even if you improve\nperformance, when traffic also increases you may still need to apply the other\ntwo techniques. For the sake of this article we'll skip over this particular\nsubject and instead focus on the other techniques.\n\n## Connection pooling\n\nIn PostgreSQL a connection is handled by starting an OS process which in turn\nneeds a number of resources. The more connections (and thus processes), the more\nresources your database will use. PostgreSQL also enforces a maximum number of\nconnections as defined in the [max_connections][max-connections] setting. Once\nyou hit this limit PostgreSQL will reject new connections. Such a setup can be\nillustrated using the following diagram:\n\n{: .text-center}\n![PostgreSQL Diagram](https://about.gitlab.com/images/scaling-the-gitlab-database/postgresql.svg)\n\nHere our clients connect directly to PostgreSQL, thus requiring one connection\nper client.\n\nBy pooling connections we can have multiple client-side connections reuse\nPostgreSQL connections. For example, without pooling we'd need 100 PostgreSQL\nconnections to handle 100 client connections; with connection pooling we may\nonly need 10 or so PostgreSQL connections depending on our configuration. This\nmeans our connection diagram will instead look something like the following:\n\n{: .text-center}\n![Connection Pooling Diagram](https://about.gitlab.com/images/scaling-the-gitlab-database/pooler.svg)\n\nHere we show an example where four clients connect to pgbouncer but instead of\nusing four PostgreSQL connections we only need two of them.\n\nFor PostgreSQL there are two connection poolers that are most commonly used:\n\n* [pgbouncer][pgbouncer]\n* [pgpool-II][pgpool]\n\npgpool is a bit special because it does much more than just connection pooling:\nit has a built-in query caching mechanism, can balance load across multiple\ndatabases, manage replication, and more.\n\nOn the other hand pgbouncer is much simpler: all it does is connection pooling.\n\n## Database load balancing\n\nLoad balancing on the database level is typically done by making use of\nPostgreSQL's \"[hot standby][hot-standby]\" feature. A hot-standby is a PostgreSQL\nreplica that allows you to run read-only SQL queries, contrary to a regular\nstandby that does not allow any SQL queries to be executed. To balance load\nyou'd set up one or more hot-standby servers and somehow balance read-only\nqueries across these hosts while sending all other operations to the primary.\nScaling such a setup is fairly easy: simply add more hot-standby servers (if\nnecessary) as your read-only traffic increases.\n\nAnother benefit of this approach is having a more resilient database cluster.\nWeb requests that only use a secondary can continue to operate even if the\nprimary server is experiencing issues; though of course you may still run into\nerrors should those requests end up using the primary.\n\nThis approach however can be quite difficult to implement. For example, explicit\ntransactions must be executed on the primary since they may contain writes.\nFurthermore, after a write we want to continue using the primary for a little\nwhile because the changes may not yet be available on the hot-standby servers\nwhen using asynchronous replication.\n\n## Sharding\n\nSharding is the act of horizontally partitioning your data. This means that data\nresides on specific servers and is retrieved using a shard key. For example, you\nmay partition data per project and use the project ID as the shard key. Sharding\na database is interesting when you have a very high write load (as there's no\nother easy way of balancing writes other than perhaps a multi-master setup), or\nwhen you have _a lot_ of data and you can no longer store it in a conventional\nmanner (e.g. you simply can't fit it all on a single disk).\n\nUnfortunately the process of setting up a sharded database is a massive\nundertaking, even when using software such as [Citus][citus]. Not only do you\nneed to set up the infrastructure (which varies in complexity depending on\nwhether you run it yourself or use a hosted solution), but you also need to\nadjust large portions of your application to support sharding.\n\n### Cases against sharding\n\nOn GitLab.com the write load is typically very low, with most of the database\nqueries being read-only queries. In very exceptional cases we may spike to 1500\ntuple writes per second, but most of the time we barely make it past 200 tuple\nwrites per second. On the other hand we can easily read up to 10 million tuples\nper second on any given secondary.\n\nStorage-wise, we also don't use that much data: only about 800 GB. A large\nportion of this data is data that is being migrated in the background. Once\nthose migrations are done we expect our database to shrink in size quite a bit.\n\nThen there's the amount of work required to adjust the application so all\nqueries use the right shard keys. While quite a few of our queries usually\ninclude a project ID which we could use as a shard key, there are also many\nqueries where this isn't the case. Sharding would also affect the process of\ncontributing changes to GitLab as every contributor would now have to make sure\na shard key is present in their queries.\n\nFinally, there is the infrastructure that's necessary to make all of this work.\nServers have to be set up, monitoring has to be added, engineers have to be\ntrained so they are familiar with this new setup, the list goes on. While hosted\nsolutions may remove the need for managing your own servers it doesn't solve all\nproblems. Engineers still have to be trained and (most likely very expensive)\nbills have to be paid. At GitLab we also highly prefer to ship the tools we need\nso the community can make use of them. This means that if we were going to shard\nthe database we'd have to ship it (or at least parts of it) in our Omnibus\npackages. The only way you can make sure something you ship works is by running\nit yourself, meaning we wouldn't be able to use a hosted solution.\n\nUltimately we decided against sharding the database because we felt it was an\nexpensive, time-consuming, and complex solution to a problem we do not have.\n\n## Connection pooling for GitLab\n\nFor connection pooling we had two main requirements:\n\n1. It has to work well (obviously).\n2. It has to be easy to ship in our Omnibus packages so our users can also take\n   advantage of the connection pooler.\n\nReviewing the two solutions (pgpool and pgbouncer) was done in two steps:\n\n1. Perform various technical tests (does it work, how easy is it to configure,\n   etc).\n2. Find out what the experiences are of other users of the solution, what\n   problems they ran into and how they dealt with them, etc.\n\npgpool was the first solution we looked into, mostly because it seemed quite\nattractive based on all the features it offered. Some of the data from our tests\ncan be found in [this][pgpool-comment-data] comment.\n\nUltimately we decided against using pgpool based on a number of factors. For\nexample, pgpool does not support sticky connections. This is problematic when\nperforming a write and (trying to) display the results right away. Imagine\ncreating an issue and being redirected to the page, only to run into an HTTP 404\nerror because the server used for any read-only queries did not yet have the\ndata. One way to work around this would be to use synchronous replication, but\nthis brings many other problems to the table; problems we prefer to avoid.\n\nAnother problem is that pgpool's load balancing logic is decoupled from your\napplication and operates by parsing SQL queries and sending them to the right\nserver. Because this happens outside of your application you have very little\ncontrol over which query runs where. This may actually be beneficial to some\nbecause you don't need additional application logic, but it also prevents you\nfrom adjusting the routing logic if necessary.\n\nConfiguring pgpool also proved quite difficult due to the sheer number of\nconfiguration options. Perhaps the final nail in the coffin was the feedback we\ngot on pgpool from those having used it in the past. The feedback we received\nregarding pgpool was usually negative, though not very detailed in most cases.\nWhile most of the complaints appeared to be related to earlier versions of\npgpool it still made us doubt if using it was the right choice.\n\nThe feedback combined with the issues described above ultimately led to us\ndeciding against using pgpool and using pgbouncer instead. We performed a\nsimilar set of tests with pgbouncer and were very satisfied with it. It's fairly\neasy to configure (and doesn't have that much that needs configuring in the\nfirst place), relatively easy to ship, focuses only on connection pooling (and\ndoes it really well), and had very little (if any) noticeable overhead. Perhaps\nmy only complaint would be that the pgbouncer website can be a little bit hard\nto navigate.\n\nUsing pgbouncer we were able to drop the number of active PostgreSQL connections\nfrom a few hundred to only 10-20 by using transaction pooling. We opted for\nusing transaction pooling since Rails database connections are persistent. In\nsuch a setup, using session pooling would prevent us from being able to reduce\nthe number of PostgreSQL connections, thus brining few (if any) benefits. By\nusing transaction pooling we were able to drop PostgreSQL's `max_connections`\nsetting from 3000 (the reason for this particular value was never really clear)\nto 300. pgbouncer is configured in such a way that even at peak capacity we will\nonly need 200 connections; giving us some room for additional connections such\nas `psql` consoles and maintenance tasks.\n\nA side effect of using transaction pooling is that you cannot use prepared\nstatements, as the `PREPARE` and `EXECUTE` commands may end up running in\ndifferent connections; producing errors as a result. Fortunately we did not\nmeasure any increase in response timings when disabling prepared statements, but\nwe _did_ measure a reduction of roughly 20 GB in memory usage on our database\nservers.\n\nTo ensure both web requests and background jobs have connections available we\nset up two separate pools: one pool of 150 connections for background\nprocessing, and a pool of 50 connections for web requests. For web requests we\nrarely need more than 20 connections, but for background processing we can\neasily spike to a 100 connections simply due to the large number of background\nprocesses running on GitLab.com.\n\nToday we ship pgbouncer as part of GitLab EE's High Availability package. For\nmore information you can refer to\n[\"Omnibus GitLab PostgreSQL High Availability.\"][ha-docs]\n\n## Database load balancing for GitLab\n\nWith pgpool and its load balancing feature out of the picture we needed\nsomething else to spread load across multiple hot-standby servers.\n\nFor (but not limited to) Rails applications there is a library called\n[Makara][makara] which implements load balancing logic and includes a default\nimplementation for ActiveRecord. Makara however has some problems that were a\ndeal-breaker for us. For example, its support for sticky connections is very\nlimited: when you perform a write the connection will stick to the primary using\na cookie, with a fixed TTL. This means that if replication lag is greater than\nthe TTL you may still end up running a query on a host that doesn't have the\ndata you need.\n\nMakara also requires you to configure quite a lot, such as all the database hosts\nand their roles, with no service discovery mechanism (our current solution does\nnot yet support this either, though it's planned for the near future). Makara\nalso [does not appear to be thread-safe][makara-thread-safe], which is\nproblematic since Sidekiq (the background processing system we use) is\nmulti-threaded. Finally, we wanted to have control over the load balancing logic\nas much as possible.\n\nBesides Makara there's also [Octopus][octopus] which has some load balancing\nmechanisms built in. Octopus however is geared towards database sharding and not\njust balancing of read-only queries. As a result we did not consider using\nOctopus.\n\nUltimately this led to us building our own solution directly into GitLab EE.\nThe merge request adding the initial implementation can be found [here][lb-mr],\nthough some changes, improvements, and fixes were applied later on.\n\nOur solution essentially works by replacing `ActiveRecord::Base.connection` with\na proxy object that handles routing of queries. This ensures we can load balance\nas many queries as possible, even queries that don't originate directly from our\nown code. This proxy object in turn determines what host a query is sent to\nbased on the methods called, removing the need for parsing SQL queries.\n\n### Sticky connections\n\nSticky connections are supported by storing a pointer to the current PostgreSQL\nWAL position the moment a write is performed. This pointer is then stored in\nRedis for a short duration at the end of a request. Each user is given their own\nkey so that the actions of one user won't lead to all other users being\naffected. In the next request we get the pointer and compare this with all the\nsecondaries. If all secondaries have a WAL pointer that exceeds our pointer we\nknow they are in sync and we can safely use a secondary for our read-only\nqueries. If one or more secondaries are not yet in sync we will continue using\nthe primary until they are in sync. If no write is performed for 30 seconds and\nall the secondaries are still not in sync we'll revert to using the secondaries\nin order to prevent somebody from ending up running queries on the primary\nforever.\n\nChecking if a secondary has caught up is quite simple and is implemented in\n`Gitlab::Database::LoadBalancing::Host#caught_up?` as follows:\n\n```ruby\ndef caught_up?(location)\n  string = connection.quote(location)\n\n  query = \"SELECT NOT pg_is_in_recovery() OR \" \\\n    \"pg_xlog_location_diff(pg_last_xlog_replay_location(), #{string}) >= 0 AS result\"\n\n  row = connection.select_all(query).first\n\n  row && row['result'] == 't'\nensure\n  release_connection\nend\n```\n\nMost of the code here is standard Rails code to run raw queries and grab the\nresults. The most interesting part is the query itself, which is as follows:\n\n```sql\nSELECT NOT pg_is_in_recovery()\nOR pg_xlog_location_diff(pg_last_xlog_replay_location(), WAL-POINTER) >= 0 AS result\"\n```\n\nHere `WAL-POINTER` is the WAL pointer as returned by the PostgreSQL function\n`pg_current_xlog_insert_location()`, which is executed on the primary. In the\nabove code snippet the pointer is passed as an argument, which is then\nquoted/escaped and passed to the query.\n\nUsing the function `pg_last_xlog_replay_location()` we can get the WAL pointer\nof a secondary, which we can then compare to our primary pointer using\n`pg_xlog_location_diff()`. If the result is greater than 0 we know the secondary\nis in sync.\n\nThe check `NOT pg_is_in_recovery()` is added to ensure the query won't fail when\na secondary that we're checking was _just_ promoted to a primary and our\nGitLab process is not yet aware of this. In such a case we simply return `true`\nsince the primary is always in sync with itself.\n\n### Background processing\n\nOur background processing code _always_ uses the primary since most of the work\nperformed in the background consists of writes. Furthermore we can't reliably\nuse a hot-standby as we have no way of knowing whether a job should use the\nprimary or not as many jobs are not directly tied into a user.\n\n### Connection errors\n\nTo deal with connection errors our load balancer will not use a secondary if it\nis deemed to be offline, plus connection errors on any host (including the\nprimary) will result in the load balancer retrying the operation a few times.\nThis ensures that we don't immediately display an error page in the event of a\nhiccup or a database failover. While we also deal with [hot standby\nconflicts][hot-standby-conflicts] on the load balancer level we ended up\nenabling `hot_standby_feedback` on our secondaries as doing so solved all\nhot-standby conflicts without having any negative impact on table bloat.\n\nThe procedure we use is quite simple: for a secondary we'll retry a few times\nwith no delay in between. For a primary we'll retry the operation a few times\nusing an exponential backoff.\n\nFor more information you can refer to the source code in GitLab EE:\n\n* \u003Chttps://gitlab.com/gitlab-org/gitlab-ee/tree/master/ee/lib/gitlab/database/load_balancing.rb>\n* \u003Chttps://gitlab.com/gitlab-org/gitlab-ee/tree/master/ee/lib/gitlab/database/load_balancing>\n\nDatabase load balancing was first introduced in GitLab 9.0 and _only_ supports\nPostgreSQL. More information can be found in the [9.0 release post][9-0-release]\nand the [documentation](https://docs.gitlab.com/ee/administration/postgresql/database_load_balancing.html).\n\n## Crunchy Data\n\nIn parallel to working on implementing connection pooling and load balancing we\nwere working with [Crunchy Data][crunchy]. Until very recently I was the only\n[database specialist][database-specialist] which meant I had a lot of work on my\nplate. Furthermore my knowledge of PostgreSQL internals and its wide range of\nsettings is limited (or at least was at the time), meaning there's only so much\nI could do. Because of this we hired Crunchy to help us out with identifying\nproblems, investigating slow queries, proposing schema optimisations, optimising\nPostgreSQL settings, and much more.\n\nFor the duration of this cooperation most work was performed in confidential\nissues so we could share private data such as log files. With the cooperation\ncoming to an end we have removed sensitive information from some of these issues\nand opened them up to the public. The primary issue was\n[gitlab-com/infrastructure#1448][issue-1448], which in turn led to many separate\nissues being created and resolved.\n\nThe benefit of this cooperation was immense as it helped us identify and solve\nmany problems, something that would have taken me months to identify and solve\nif I had to do this all by myself.\n\nFortunately we recently managed to hire our [second database specialist][gstark]\nand we hope to grow the team more in the coming months.\n\n## Combining connection pooling and database load balancing\n\nCombining connection pooling and database load balancing allowed us to\ndrastically reduce the number of resources necessary to run our database cluster\nas well as spread load across our hot-standby servers. For example, instead of\nour primary having a near constant CPU utilisation of 70 percent today it\nusually hovers between 10 percent and 20 percent, while our two hot-standby\nservers hover around 20 percent most of the time:\n\n![CPU Percentage](https://about.gitlab.com/images/scaling-the-gitlab-database/cpu-percentage.png)\n\nHere `db3.cluster.gitlab.com` is our primary while the other two hosts are our\nsecondaries.\n\nOther load-related factors such as load averages, disk usage, and memory usage\nwere also drastically improved. For example, instead of the primary having a\nload average of around 20 it barely goes above an average of 10:\n\n![CPU Percentage](https://about.gitlab.com/images/scaling-the-gitlab-database/load-averages.png)\n\nDuring the busiest hours our secondaries serve around 12 000 transactions per\nsecond (roughly 740 000 per minute), while the primary serves around 6 000\ntransactions per second (roughly 340 000 per minute):\n\n![Transactions Per Second](https://about.gitlab.com/images/scaling-the-gitlab-database/transactions.png)\n\nUnfortunately we don't have any data on the transaction rates prior to deploying\npgbouncer and our database load balancer.\n\nAn up-to-date overview of our PostgreSQL statistics can be found at our [public\nGrafana dashboard][postgres-stats].\n\nSome of the settings we have set for pgbouncer are as follows:\n\n| Setting              | Value       |\n|----------------------|-------------|\n| default_pool_size    | 100         |\n| reserve_pool_size    | 5           |\n| reserve_pool_timeout | 3           |\n| max_client_conn      | 2048        |\n| pool_mode            | transaction |\n| server_idle_timeout  | 30          |\n\nWith that all said there is still some work left to be done such as:\nimplementing service discovery ([#2042][issue-2042]), improving how we check if\na secondary is available ([#2866][issue-2866]), and ignoring secondaries that\nare too far behind the primary ([#2197][issue-2197]).\n\nIt's worth mentioning that we currently do not have any plans of turning our\nload balancing solution into a standalone library that you can use outside of\nGitLab, instead our focus is on providing a solid load balancing solution for\nGitLab EE.\n\nIf this has gotten you interested and you enjoy working with databases,\nimproving application performance, and adding database-related features to\nGitLab (such as [service discovery][issue-2042]) you should definitely check out\nthe [job opening][job-opening] and the [database specialist handbook\nentry][database-specialist] for more information.\n\n[max-connections]: https://www.postgresql.org/docs/9.6/static/runtime-config-connection.html#GUC-MAX-CONNECTIONS\n[pgbouncer]: https://pgbouncer.github.io/\n[pgpool]: http://pgpool.net/mediawiki/index.php/Main_Page\n[hot-standby]: https://www.postgresql.org/docs/9.6/static/hot-standby.html\n[pgpool-comment-data]: https://gitlab.com/gitlab-com/infrastructure/issues/259#note_23464570\n[ha-docs]: https://docs.gitlab.com/ee/administration/postgresql/index.html\n[makara]: https://github.com/taskrabbit/makara\n[makara-thread-safe]: https://github.com/taskrabbit/makara/issues/151\n[lb-mr]: https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/1283\n[issue-2042]: https://gitlab.com/gitlab-org/gitlab-ee/issues/2042\n[issue-2866]: https://gitlab.com/gitlab-org/gitlab-ee/issues/2866\n[issue-2197]: https://gitlab.com/gitlab-org/gitlab-ee/issues/2197\n[9-0-release]: /releases/2017/03/22/gitlab-9-0-released/\n[lb-docs]: https://docs.gitlab.com/ee/administration/database_load_balancing.html\n[postgres-stats]: https://dashboards.gitlab.com/dashboard/db/postgresql-overview?refresh=5m&orgId=1\n[hot-standby-conflicts]: https://www.postgresql.org/docs/current/static/hot-standby.html#HOT-STANDBY-CONFLICT\n[citus]: https://www.citusdata.com/\n[octopus]: https://github.com/thiagopradi/octopus\n[crunchy]: https://www.crunchydata.com/\n[database-specialist]: /handbook/engineering/infrastructure/database/\n[job-opening]: /job-families/engineering/database-engineer/\n[issue-1448]: https://gitlab.com/gitlab-com/infrastructure/issues/1448\n[gstark]: https://gitlab.com/_stark\n",[9,2396],{"slug":5755,"featured":6,"template":680},"scaling-the-gitlab-database","content:en-us:blog:scaling-the-gitlab-database.yml","Scaling The Gitlab Database","en-us/blog/scaling-the-gitlab-database.yml","en-us/blog/scaling-the-gitlab-database",{"_path":5761,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5762,"content":5768,"config":5773,"_id":5775,"_type":14,"title":5776,"_source":16,"_file":5777,"_stem":5778,"_extension":19},"/en-us/blog/security-strengthened-by-interation-and-transparency",{"title":5763,"description":5764,"ogTitle":5763,"ogDescription":5764,"noIndex":6,"ogImage":5765,"ogUrl":5766,"ogSiteName":667,"ogType":668,"canonicalUrls":5766,"schema":5767},"Security strengthened by iteration, and transparency","Iteration is a core value at GitLab. How do you keep things protected when change is a constant?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670837/Blog/Hero%20Images/two-brown-trees.jpg","https://about.gitlab.com/blog/security-strengthened-by-interation-and-transparency","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Security strengthened by iteration, and transparency\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2020-05-18\",\n      }",{"title":5763,"description":5764,"authors":5769,"heroImage":5765,"date":5770,"body":5771,"category":698,"tags":5772},[1010],"2020-05-18","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n\n***We sat down with senior application security engineer, Dominic Couture to talk about the challenges of working in AppSec, why the principle of least privilege works, and why our level of transparency makes our product more, not less, secure.***\n\n---\n\n![Dominic Couture Headshot](https://about.gitlab.com/images/blogimages/dcouture.png){: .small.right.wrap-text} **Name:** Dominic Couture\n\n**Title:** Senior security engineer, [Application Security](/topics/devsecops/)\n\n**How long have you been at GitLab?** I started in November 2019\n\n**GitLab handle:** [@dcouture](https://gitlab.com/dcouture)\n{: #tanuki-orange}\n\n**Connect with Dominic:** [LinkedIn](https://ca.linkedin.com/in/dominic-couture)/[Twitter](https://twitter.com/dee__see)\n\n\n\n#### Tell us what you do here at GitLab:\nI read a lot of GitLab code! I look for vulnerabilities or simply code improvements before it is shipped, as part of defense in depth. I also review issues when they’re in the planning stage for potential vulnerabilities, help maintain our [secure coding guidelines](https://docs.gitlab.com/ee/development/secure_coding_guidelines.html), write new tests and automation to support team workflows, and triage bugs that come through our bug bounty program.\n\n#### What’s the most challenging or rewarding aspect of your role?\nThe most challenging thing is trying to keep an eye on everything. There are tons of new features being worked on at all times and we know we can’t review every single one of them, so we prioritize and review what appears to be the most security critical. However, sometimes vulnerabilities will slip by in issues that didn’t seem to be security-sensitive at first. When this happens, we need to find ways to optimize our processes to ensure we catch potential issues  the next time we’re in a similar situation.\n\nThe most rewarding thing is when we do the above successfully! When we identify a common flaw in our code or process and we successfully put automation in place that eliminates it. It makes the product safer and the workload lighter so we can concentrate on new things.\n\n#### And, what are the top 2-3 initiatives you’re currently focused on?\nMany of the things we work on in the Application Security team are [not public](https://handbook.gitlab.com/handbook/values/#not-public) until they are finished so I can’t link to the detailed issues, but with that in mind…\n* I’m currently working on getting some automated testing in place to catch permission bugs in a specific part of our app. This will cover existing code and make it easy to test future code in that part of the application.\n* We’re also starting on a code review in another part of GitLab to find information leaks in APIs that might return more than the user asked for. We’re looking for issues similar to the leaks we’ve seen previously [through Elasticsearch results](https://gitlab.com/gitlab-org/gitlab/-/issues/29491).\n* I’m getting to know the teams and features in the [Verify](/stages-devops-lifecycle/verify/) and [Release](/stages-devops-lifecycle/release/) stages as I’m the [stable counterpart](/handbook/security/security-engineering/application-security/stable-counterparts.html) for them. I’m developing an expertise in those specific areas so I can have more context and provide more insightful comments when those teams ask for application security reviews.\n\n#### What is the most significant piece of security advice you could provide to a colleague or friend?\nI think everyone on our security team [who’s been asked this question](/blog/the-sky-is-not-falling/) has answered to use a password manager and I completely agree. A password manager and a unique password (and [MFA](https://en.wikipedia.org/wiki/Multi-factor_authentication)!) on every service you use is the difference between a relatively harmless leak on that small niche forum you participate in and a full identity theft due to a [credential stuffing](https://en.wikipedia.org/wiki/Credential_stuffing) attack that pivots to your bank account.\n\nFor a more technical piece of advice, I think the [principle of least privilege](https://en.wikipedia.org/wiki/Principle_of_least_privilege) is something to keep in mind at all times. When applied to APIs, the idea is to have the strictest permission requirements as a default. This ensures that if the permissions aren’t verified properly in the code, the result would be a bug which wouldn’t allow access to an asset by a user who should have access rather than a security bug that results in a data breach.\n\n#### How did you get into security? \nHackers have always fascinated me. As a child I had the desire to understand how what they were doing was possible and it is what got me interested in computers in the first place. I was in my early teens when I got my first computer and I quickly taught myself how to build websites. When talking to people about my programming projects I was warned about things like SQL injection and other types of security vulnerabilities. That piqued my curiosity and while researching those topics I discovered that [wargames](https://en.wikipedia.org/wiki/Wargame_(hacking)) existed. Since then, “hacking for fun” has always been a hobby for me. I’ve been a software developer for most of my career and while security has always been a part of that job, it was only when I joined GitLab that I became a security professional and transformed my hobby into a career.\n\n#### What do you look forward to most in security in the next 5 years?\nWhile automation will never solve all the problems, it can certainly solve some of them! I’m both curious and excited about security scanners moving to the next level with more insightful analysis and fewer false positives. AI and machine learning are the usual buzzwords we hear around this topic but I mainly look forward to [SAST](https://docs.gitlab.com/ee/user/application_security/sast/) tools having a better understanding of the code flow and being able to tell if my `os.Open(path)` call really involves user input and is indeed risky; instead of just flagging it for me to review *in case* it is.\n\n#### What mainstream or industry propagated security myth would you like to be better understood?\n[Virtual Private Networks](https://en.wikipedia.org/wiki/Virtual_private_network) (VPNs) are highly praised in online advertising lately and the claims around the safety they provide seem to be a bit exaggerated. In fact, [GitLab doesn’t even have a corporate VPN](/handbook/security/#why-we-dont-have-a-corporate-vpn)! I really enjoy [Tom Scott’s video](https://www.youtube.com/watch?v=WVDQEoe6ZWY) about the subject. In brief: VPNs nowadays provide little more security than the near-ubiquitous https protocol already does in many of the everyday use cases, and that includes using your laptop at the coffee shop. Don’t get me wrong, VPNs are very relevant and there are many valid reasons to use one, I just feel like the advertising around them isn’t completely truthful and people with no technical knowledge might be led to buy things they don’t need.\n\n#### GitLab is very unique in that we strive to be incredibly transparent...about everything.  What sort of challenges or opportunities does that present to you as a security professional?\nTransparency is a part of everything we do here at GitLab and most things are [public by default](https://handbook.gitlab.com/handbook/values/#public-by-default). This transparency-driven approach can lead to some occasional share of things that should not be public. Keeping an eye on those things to catch them before someone else does is challenging. Luckily for us, we run a public bug bounty program and have reporters that are very skilled at finding those things before the “bad people” do, should something slip through our fingers. While we’d rather keep those bounty payments to a minimum, it’s still a better outcome for GitLab than if someone had abused the leaked information.\n\nWith our open-source code base, the [blog articles](/blog/how-to-exploit-parser-differentials/) the security research team publishes about their findings, and our disclosure of the [bugs that come in through our bug bounty program](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=HackerOne) 30 days after being fixed, external researchers get an almost unparalleled level of insight and information about GitLab. This allows them to find and report much better vulnerabilities than if they were doing their testing in a black-box environment. The security risks associated with our level of transparency are usually the first thing to come to people’s mind, but in fact, our transparency makes our software more secure.\n\n> The security risks associated with our level of transparency are usually the first thing to come to people’s mind, but in fact, our transparency makes our software more secure.\n\n#### What sources make up your daily newsfeed to keep up to date in the industry?\nI try to use social media as little as possible, but I can’t deny that Twitter is the best place for security news. There are great blogs and websites to follow ([our GitLab Security blog](/blog/categories/security/), [PortSwigger’s research blog](https://portswigger.net/research) and [Google Project Zero](https://googleprojectzero.blogspot.com/) come to mind) but there are also tons of independent researchers that publish only once or twice a year and Twitter is the place to find out about all that good content.\n\n## Now, for the questions you *really* want to have answered:\n\n\n#### Favorite Linux distro?\nArch Linux! The installation process isn’t as hard as the memes pretend it is, the documentation is wonderful and you have a lot of power over what runs on your system. Arch uses systemd which has been a polarizing topic in recent years but if you don’t mind that it’s a great distro.\n\n#### What’s your favorite season?\nWinter. Luckily for me, I live in a place that’s covered in snow nearly 6 months a year so there’s a lot of winter to enjoy! There’s nothing like the freedom and fun of exploring the local forest and mountains on my nordic touring skis.\n\n#### When you’re not working, what do you enjoy doing?\nI run, bike, ski and hike a lot (always with my 2 australian shepherds by my side) and that serves as permanent training for the one or two ultramarathons I run each year. I love camping out in the forest with as little equipment as possible and basically just spending time in the forest. When inside, I like to hunt for security bugs on companies that run bug bounty programs (if it’s not on GitLab, it’s not work anymore, right?).\n\n#### Have a favorite quote?\n> The best time to plant a tree was 20 years ago. The second best time is now.\n\nThe internet says it’s a Chinese proverb though there’s nothing to back that up. We could probably all point to things we could/should have done differently in life but all that time spent thinking about it is time that isn’t spent actually doing it and benefiting from the change. It’s not too late!\n\nCover image by [Johannes Plenio](https://www.pexels.com/@jplenio) on [Pexels](https://www.pexels.com/photo/two-brown-trees-1632790/)\n{: .note}\n",[810,9,720,720],{"slug":5774,"featured":6,"template":680},"security-strengthened-by-interation-and-transparency","content:en-us:blog:security-strengthened-by-interation-and-transparency.yml","Security Strengthened By Interation And Transparency","en-us/blog/security-strengthened-by-interation-and-transparency.yml","en-us/blog/security-strengthened-by-interation-and-transparency",{"_path":5780,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5781,"content":5787,"config":5792,"_id":5794,"_type":14,"title":5795,"_source":16,"_file":5796,"_stem":5797,"_extension":19},"/en-us/blog/six-key-practices-that-improve-communication",{"title":5782,"description":5783,"ogTitle":5782,"ogDescription":5783,"noIndex":6,"ogImage":5784,"ogUrl":5785,"ogSiteName":667,"ogType":668,"canonicalUrls":5785,"schema":5786},"How to Improve Company Communication","Learn here how we've streamlined and improved company communication in six ways. And now your company can too.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680960/Blog/Hero%20Images/simon-abrams.jpg","https://about.gitlab.com/blog/six-key-practices-that-improve-communication","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to Improve Company Communication\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Brinkman\"}],\n        \"datePublished\": \"2019-12-23\",\n      }",{"title":5782,"description":5783,"authors":5788,"heroImage":5784,"date":5789,"body":5790,"category":808,"tags":5791},[4431],"2019-12-23","\n\nRecently, we caught up with a Fortune 50 company that wanted to understand how to enact change more quickly,\nbolster its product management practice, and execute projects more efficiently. This\nled to a conversation with our CEO and co-founder [Sid Sijbrandij](/company/team/#sytses), who\nwalked through a few key GitLab practices for improving communication in the workplace. Luckily, I was able to \"shadow\" this conversation as I was participating in our [CEO shadow program](/handbook/ceo/shadow/) at the time.\nAfter the discussion, we quickly realized it made sense to share our best practices. While these power our all-remote organization, we think they're good ideas for any company to consider.\n\n## Utilize directly responsible individuals (DRIs)\n\nIn our organization, we have the concept of the [directly responsible individual](/handbook/people-group/directly-responsible-individuals/).\nAnd, as you may have guessed, that person is **directly** responsible for the decision\nthey are tasked with. This could be something routine, such as a prioritization decision\n(in this case the typical DRI is the product manager) or something bigger, such as choosing a vendor to partner with to implement product analytics. The DRI is expected to become\ninformed about options and alternatives via their team, but is ultimately the one responsible\nfor making the call. This helps because you don’t have to wait for consensus-driven decision-making.\nMost organizations are slowed down by governance teams or by a need to ensure every single person\nimpacted signs off. While it’s important to communicate, it can slow you down if you wait for everyone\nto sign off. Consider implementing DRIs to help ensure high velocity decision-making.\n\n\n\n## Make product and engineering responsibilities distinct\n\nAt many organizations, the product manager is responsible for not only setting the priorities,\nbut also must ensure those priorities are shipped on time. This leads to\nan odd situation where product is held accountable for shipping code, something that is typically\noutside of the team's control. At GitLab, we clearly outline that product is responsible for prioritizing\nand defining what is to be done and engineering is responsible for shipping the defined functionality. Setting clear boundaries around what each functional area is responsible\nfor leads to an environment where people can get away from finger pointing and back to the job they should\nhave been doing all along.\n\n## Share via InnerSourcing\n\nAt GitLab, everything is [public by default](/handbook/hiring/principles/#transparency) and there should be a documented\nreason why an issue or line of code needs to be private. Why? The answer is simple:\nby making everything public by default, everyone in the community can contribute. Now, we\nrealize public repositories and issue trackers may not be feasible for every organization,\nbut this typically doesn’t apply _inside_ the organization. InnerSourcing is a mindset shift\nthat helps organizations share code and best practices internally.\nWhen code repositories and issue trackers become open, teams have a much easier time collaborating\non problems and solutions that may be siloed. [DevOps](/topics/devops/) is all about breaking down silos and InnerSourcing\nis a great way to not only reuse code and ideas, but also encourage collaboration.\n\n## Write everything down\n\nCan you recall a time where you went to a meeting, made a decision, and then came back next week to\nfind people forgot that happened? Unfortunately, this is too common at many organizations\nand leads to unnecessary rehashing of the same information, arguments, and talking points. By writing everything down,\nit’s clear when a decision was made or a process was changed. Writing things down is a high leverage activity –\nit allows information to be documented once and then disseminated to many people with little\neffort on the part of the author. It also helps to maintain a record of what happened. At GitLab, we write things down in issues and merge requests. And for bigger things, we have a\n[handbook](/handbook/) of over 3,000 pages where we outline how the company works,\nits various processes, and our product strategy. This single source of truth is also constantly being updated because we encourage everyone to propose changes and additions to it.\n\n## Iterate, iterate, iterate\n\n[Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is one of GitLab’s core values for a number of\nreasons. When you iterate, you reduce the need for coordination amongst many teams and stakeholders. The smallest change or proposal\nyou make, the fewer people you need to ask for permission. If you are going to take six months to build something, you will need to spend a lot of time getting stakeholder and executive buy-in to ensure resources\nare being leveraged appropriately. Conversely, if you are going to take two weeks to build something, less buy-in is\nrequired and it is much easier to know if you’re on the right path. In larger organizations, coordination is the\nthing that slows you down. Iterating allows for quicker feedback.\n\n## Understand the job to be done\n\nThe [jobs to be done](https://hbr.org/2016/09/know-your-customers-jobs-to-be-done)\n(J2BD) framework is popular for shifting away from correlation-based models and towards what the customer is trying to\naccomplish. We heavily utilize our user experience (UX) group to work closely with our product management team in order\nto identify and highlight the top jobs to be done. We invest heavily in user research to confirm the jobs\nto be done. The jobs are turned into scorecards which outline areas of potential improvement. These potential improvements\nare provided to product managers to consider when prioritizing features. The jobs-to-be-done framework\nis important to identify cross-service workflows such as code deployment which crosses many DevOps stages\nwithin GitLab. When you fully understand your users, you’re able to prioritize the improvements that\nmatter, leading to a better product.\n\nWhile not an exhaustive list, the six characteristics identified above are key to GitLab’s success as an [all-remote company](/company/culture/all-remote/). And all of these practices can be taken and adapated for any organization looking to strengthen communication in the workplace or considering a move to all remote.\n\nMost everything we do is publicly available, from our code to our roadmaps to our product\nmanagement processes. If you’re interested, you can find out  more in our [product handbook](/handbook/product/),\nwhich outlines other axioms and best practices for software product development. And, as always, if what you’ve just read\nresonates with you, and you’d like to join the team, let [me](https://gitlab.com/ebrinkman) know. We’ve more than tripled\nour team in 2019, and we’ll likely be doubling again in 2020.\n\nCover image by [Simon Abrams](https://unsplash.com/@flysi3000) on [Unsplash](https://unsplash.com)\n{: .note}\n\n",[9,832,700],{"slug":5793,"featured":6,"template":680},"six-key-practices-that-improve-communication","content:en-us:blog:six-key-practices-that-improve-communication.yml","Six Key Practices That Improve Communication","en-us/blog/six-key-practices-that-improve-communication.yml","en-us/blog/six-key-practices-that-improve-communication",{"_path":5799,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5800,"content":5805,"config":5810,"_id":5812,"_type":14,"title":5813,"_source":16,"_file":5814,"_stem":5815,"_extension":19},"/en-us/blog/small-experiments-significant-results-and-learnings",{"title":5801,"description":5802,"ogTitle":5801,"ogDescription":5802,"noIndex":6,"ogImage":1452,"ogUrl":5803,"ogSiteName":667,"ogType":668,"canonicalUrls":5803,"schema":5804},"Small experiments, significant results and learnings","How our Growth team validates design solutions with the smallest experiments possible","https://about.gitlab.com/blog/small-experiments-significant-results-and-learnings","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Small experiments, significant results and learnings\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matej Latin\"}],\n        \"datePublished\": \"2021-04-07\",\n      }",{"title":5801,"description":5802,"authors":5806,"heroImage":1452,"date":5807,"body":5808,"category":698,"tags":5809},[1897],"2021-04-07","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nA while ago, I worked closely with the Growth:Expansion team on improving the experience of inviting users to GitLab. I first went through the existing experience of a user that’s inviting their team to GitLab and found a few opportunities for improvements. The work could have ended there, but I felt uneasy about it in the days after completing it. I felt that there was more to it, so I dug in again. This time, I wanted to explore what the experience was across many users involved in the process, instead of just the inviting user.\n\n## Multi-user journey map\n\nSo instead of mapping out a single-user journey map, I mapped out the journey as it was then for all the users involved. I came up with the following:\n\n![Multi-user journey map](https://about.gitlab.com/images/blogimages/small-experiments/multi-user-journey.png)\n\nTake a look at the [multi-user journey map on Mural](https://app.mural.co/t/gitlab2474/m/gitlab2474/1588920686905/a2982098783c967cee6f7e656fffe574dec0777b).\n\nI wanted to see what it was like for non-admin users to invite their team to GitLab or assign some work to them (not all managers and leads are admin users). So a non-admin user wants to assign an issue to a team member that isn’t on GitLab yet. There are three users involved: the non-admin user trying to assign some work, the admin user who is the only one who can invite new users and the user that is being invited.\nThe main conclusion of this multi-user journey map? There are many interruptions and a lot of waiting time between the steps. Such a simple task as assigning an issue to a team member can span across days because of these interruptions.\n\nThe other conclusion of this work was that it was hard to find out how to invite users to GitLab, especially for new teams trying to adopt GitLab.\n\nSo we came up with a problem to solve:\n\n> Can we make it easier for new teams to invite their team members?\n\nand a question to answer:\n\n> Would non-admin users request invitations to their team members if they could?\n\n## Making it easier for new teams to invite their team members\nWe started with the smaller problem as it was a great candidate to do a MVC (minimal viable change) experiment and learn a lot from it. The concept of the solution was simple: increase the discoverability of the *invite members* feature. After some thought, I realized that the best place for this was the Assignee dropdown that we use on issues and merge requests. It’s at the top of the right sidebar, which means it’s quite prominent, but more importantly, it is a commonly used feature related to team management.\n\nSo we decided that the most minimal experiment we could do was to add the “Invite members” link to the bottom of that dropdown and link directly to the *Settings* → *Members* page of the project. That’s the page where admin users can invite new users.\n\n![Assignee dropdown](https://about.gitlab.com/images/blogimages/small-experiments/assignee-dropdown.png){: .small.center}\n\n[Here’s the prototype](https://www.sketch.com/s/e96544d1-f2c7-45d5-a968-23e63064432d/a/bD9885/play) of the experience we tested. After crunching the numbers of the experiment we saw the following results:\n\n- Only a 0.16% click-through rate on the “Invite members” link in the dropdown\n- But a 2% increase in namespaces with two or more users\nThis was significant because we only showed the new “Invite members” link to admin users. So the low click-through rate makes sense as the majority of users viewing the assignee are not admins and therefore did not see our test \"invite members\" option. However, even with a low click-through rate, the change in the metric that mattered most saw a 2% increase. Which is a considerable increase on its own! But we’re just getting started.\n\n## Do non-admin users want to invite their team members?\n\nNow we come to the question that we uncovered during the mapping of the multi-user journey. There’s a lot of waiting time and disruptions in the process of inviting a new user to GitLab. Especially when a non-admin user wants to do it. So we decided to run a similar experiment where we show the “Invite members” link in the assignee dropdown to non-admin users too.\nFollowing our MVC approach to conducting experiments, we wanted to run a minimal experiment that would help us answer this question. Instead of taking the time to build a complete experience for non-admin users requesting invitations from admin users, we decided to show a modal explaining that this feature isn’t available yet. We also added a link that would take the non-admin user to the *Settings* → *Members* page, where they can see who the admin is and contact them outside of GitLab (for now).\n\n![Modal not ready](https://about.gitlab.com/images/blogimages/small-experiments/modal-not-ready.png)\nIt’s not the ideal experience, but the potential for learning justified it. Plus, we only show an experiment like this to a fraction of our users. The experiment has only been running for a few weeks, so it’s too early for conclusions. But we’re seeing encouraging results already, some suggest even up to a 20% increase in namespaces with two or more users so yes, it seems that non-admin users do want to invite their team members.\n\n## Other improvements and follow-up experiments\nOne major improvement that our engineers have been working on is the “Invite members” modal. Instead of taking the user out of their workflow and into the *Settings* → *Members* page, they’ll be able to invite team members within their current workflow.\n\n![Modal invite form](https://about.gitlab.com/images/blogimages/small-experiments/modal-invite.jpg)\n\n[This is a prototype](https://www.sketch.com/s/e96544d1-f2c7-45d5-a968-23e63064432d/a/wmOa8m/play) of what the experience would be like with the invite modal.\n\n## Conclusion\n\nThese experiments are the first among many that we want to conduct. We’re also thinking about allowing non-admin users to request a free trial, activation of a feature, switching to a higher plan from their admin all while potentially giving the admin the ability to turn this functionality off and on as needed. The experiments we conducted so far are indicating that there’s a demand for non-admin users to be able to request things limited to admins. And most importantly, they were minimal experiments that led to significant results and great learnings.\n\nFor more details about these experiments check\n* [the original experiment design issue](https://gitlab.com/gitlab-org/gitlab/-/issues/217921)\n* [the follow-up experiment design issue](https://gitlab.com/gitlab-org/gitlab/-/issues/235979)\n* [the video recording of experiment and results discussion between me and Sam Awezec](https://www.youtube.com/watch?v=J5h_SNH3Nt8&ab_channel=GitLabUnfiltered) (the Product Manager of Growth:Expansion)\n\nPhoto by [Evgeni Tcherkasski](https://unsplash.com/@evgenit?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/small?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1152,9,700],{"slug":5811,"featured":6,"template":680},"small-experiments-significant-results-and-learnings","content:en-us:blog:small-experiments-significant-results-and-learnings.yml","Small Experiments Significant Results And Learnings","en-us/blog/small-experiments-significant-results-and-learnings.yml","en-us/blog/small-experiments-significant-results-and-learnings",{"_path":5817,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5818,"content":5824,"config":5829,"_id":5831,"_type":14,"title":5832,"_source":16,"_file":5833,"_stem":5834,"_extension":19},"/en-us/blog/software-test-at-gitlab",{"title":5819,"description":5820,"ogTitle":5819,"ogDescription":5820,"noIndex":6,"ogImage":5821,"ogUrl":5822,"ogSiteName":667,"ogType":668,"canonicalUrls":5822,"schema":5823},"An inside look at software testing at GitLab","Director of quality engineering Mek Stittri talks test technology and the future of automation at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680800/Blog/Hero%20Images/softwaretestlaunch.jpg","https://about.gitlab.com/blog/software-test-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An inside look at software testing at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-08-30\",\n      }",{"title":5819,"description":5820,"authors":5825,"heroImage":5821,"date":5826,"body":5827,"category":787,"tags":5828},[869],"2019-08-30","\n\n_In our [just-released survey of over 4,000 developers, security\nprofessionals, and operations team members](/developer-survey/), there was one thing everyone agreed on: 50% of each group\nsaid software testing is the biggest reason why development is delayed. Testers have long\nbeen the underdogs in the SDLC and that viewpoint is apparently very slow to change.\nTo understand what’s really going on, and how things work at GitLab, we\nasked [Mek Stittri](/company/team/#mekdev), director of quality engineering, to share his\nperspective on what’s working with test today and what’s in need of improvement._\n\n## Why is test a continued DevOps problem?\n\nIt’s a two-part answer, Mek says. First, there are simply not enough tests run and second, the tests that are used are often flaky (meaning their results aren’t necessarily trustworthy).\n\nTackling the issue of not running enough tests, Mek says it’s an area GitLab is addressing. “At GitLab, I think we are better than other companies where developers write unit tests and integration tests every time a change goes in,” he says. “That is great, but that testing is at a lower level, and it doesn't really map to a business use case.” To write better tests a team needs test requirements, but there can be so many different sets of stakeholders that it can be tough to get their input about *test* requirements and not just feature requirements. “We are improving it here at GitLab where our VP of Product [Scott Williamson](https://gitlab.com/sfwgitlab) is doing a great job. We have a section for test requirements right now (in the issue and merge request templates). It's now a blank and free form for people to fill in, but it should be highlighted going forward as a required section taking input from product discovery and validation as a deliverable.”\n\nThe bottom line: the stakeholders who are delivering the code need to understand the end goal better. “Unit tests test code at a smaller scale, and that’s great, but it doesn’t really verify the functionality works end to end as a whole. We need more coverage and more understanding of what needs to be tested.”\n\n![The Apollo 11 launch framework](https://about.gitlab.com/images/blogimages/apollo11framework.png){: .shadow.small.center}\nApollo 11 is held up by a framework and software is no different.\n{: .note.text-center}\n\nMek likens this process to Apollo 11. Everyone is excited about the rocket (the software features, in other words) but no one pays attention to the red scaffolding on the right that’s actually holding the rocket up. “That’s the side that nobody looks at but it’s a lot of work,” he says. “It’s taller than the rocket. We need to build that platform to have adequate testing (functional, performance, etc).” The ideal situation to get a company there? Start building the test framework and add test coverage at the exact same time the product is being built. “You assemble it together, run it, it’s passing and we go for launch and it’s shipped. We’re not there yet. And I can assure you a lot of companies out there aren’t there yet either.”\n\n## About those flaky tests…\n\n“There are a lot of test automation engineers and test developers out there, but not all of them know how to write and design a good test,” Mek explains. Automated tests needs to function like a flow of self-retrying dominoes where if one step is not completed it needs to keep retrying to reach the next step. Tests need to mimic what a manual tester would do, he says. No manual tester is going to click on a button and then wait 10 minutes. The tester will click again, or try other strategies. “At GitLab [we put emphasis on test framework reliability](/handbook/engineering/quality/#test-framework-reliability-and-efficiency) and we treat each user workflow step like a piece of retrying dominoes. We need to make sure all the dominoes fall over so the workflow is completed,” Mek says.\n\n>We need more coverage and more understanding of what needs to be tested.\n\nSo companies need to think through how the tests work, but also test the right things. If that happens, quality can be everyone’s responsibility in the end, Mek says. “We want developers to contribute to the end-to-end test so you want to make a test framework that is easy to use and easy to read. I think this all factors in.” And Mek points out it really is in everyone’s best interests to think about quality first. “Let's make the process better so we work smarter, right? We achieve more without having to work weekends or get pinged during your family dinner. Nobody wants that.”\n\n## Test automation and machine learning\n\nTest automation is a cornerstone of successful [DevOps](/topics/devops/) but it remains difficult for many companies to achieve. Mek’s take: “We need to design the product such that the test automation framework can integrate into it well,” he says flatly. That requires good collaboration with development teams due to frontend UI locators and backend APIs that are the interfaces to enable better and stable test automation. “Go back to Apollo 11,” Mek says. “It's like the connections along the rocket's fuselage. I need to integrate with this to make sure things are working fine. The probes and sensors need to be there. So if those aren't there, then your test automation engineers need to code around these obstacles. It's not working smart.” In other words, the test automation framework should not take the longer route when executing user interactions to the application because this can be the source of unstable and in-efficient tests.\n\nOne step that can help companies – including GitLab – get there is [machine learning](https://medium.com/machine-learning-for-humans/why-machine-learning-matters-6164faf1df12). “We are having discussions here at GitLab about where we want a bot,” Mek says. “I think machine learning will come and help, but the input and output needs to be clearly defined so you have a clear implementation direction, TensorFlow, Linear Regression, or whatever techniques. You can write a bot that just lives in the product, meaning it looks at all the UI locators (dedicated to test automation) on a page and randomly clicks one of those links.” This GitLab bot of the future will work 24/7, clicking, clicking, clicking on the page until it errors out or runs into a 404, Mek says. The goal is to create a bot that is like a “menacing QA engineer” that can be programmed to keep banging on the problematic areas until everything is solved. To get there will require lots of data – machine learning literally needs to learn from data and experience – and although there are a handful of companies experimenting with this now, this is all still very early stage.\n\n## Where we’re headed with testing\n\nMek and his team hope to increase both quality and productivity this year which may be a bit of a balancing act, since more “quality” equals more testing which can result in a longer development cycle and perhaps reduced productivity (this is why we say test automation engineers are often unappreciated!). “My department is working this quarter to have a full suite of automated tests for our enterprise features. We want to have a big checkbox for the enterprise features every time we deploy. We need this because it is mapping to the business use case.” But Mek and team need to do all of that while shortening the test runtime for developers. “You want more test coverage but we need to keep the runtime low because we can’t have developers and release managers wait two hours.”\n\nThe plan is to add more runners, optimize them, de-duplicate some tests and make sure the process is as streamlined as it can be. “Right now it takes about an hour or so, but I would love to have it down to 30 minutes where we certify that this merge request going in checks all the boxes and all the enterprise features are not broken. We need to set ourselves an aggressive goal and I would say 30 minutes is a good first step.”\n\nCover image by [Kurt Cotoaga](https://unsplash.com/@kydroon) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,722,1440,723],{"slug":5830,"featured":6,"template":680},"software-test-at-gitlab","content:en-us:blog:software-test-at-gitlab.yml","Software Test At Gitlab","en-us/blog/software-test-at-gitlab.yml","en-us/blog/software-test-at-gitlab",{"_path":5836,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5837,"content":5843,"config":5848,"_id":5850,"_type":14,"title":5851,"_source":16,"_file":5852,"_stem":5853,"_extension":19},"/en-us/blog/solving-gitlabs-changelog-conflict-crisis",{"title":5838,"description":5839,"ogTitle":5838,"ogDescription":5839,"noIndex":6,"ogImage":5840,"ogUrl":5841,"ogSiteName":667,"ogType":668,"canonicalUrls":5841,"schema":5842},"How we solved GitLab's CHANGELOG conflict crisis","How we eliminated changelog-related merge conflicts and automated a crucial part of our release process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672139/Blog/Hero%20Images/solving-gitlab-changelog-crisis.jpg","https://about.gitlab.com/blog/solving-gitlabs-changelog-conflict-crisis","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we solved GitLab's CHANGELOG conflict crisis\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robert Speicher\"}],\n        \"datePublished\": \"2018-07-03\",\n      }",{"title":5838,"description":5839,"authors":5844,"heroImage":5840,"date":5845,"body":5846,"category":743,"tags":5847},[3987],"2018-07-03","\n\nSince its [very first commit] more than six years ago, GitLab has had a changelog\ndetailing the noteworthy changes in each release. Shortly after [Enterprise\nEdition (EE) was introduced], it [got a changelog of its own]. Whenever anyone\n– whether it was a community contributor or a GitLab employee – contributed a\nnew feature or fix to the project, a changelog entry would be added to let users\nknow what improved.\n\nAs GitLab gained in popularity and started receiving more contributions, we'd\nconstantly see merge conflicts in the changelog when multiple merge requests\nattempted to add an entry to the list. This quickly became a major source of\ndelays in development, as contributors would have to rebase their branch in order\nto resolve the conflicts.\n\nThis post outlines how we completely eliminated changelog-related merge\nconflicts, removed bottlenecks for contributions, and automated a crucial part\nof our release process.\n\nAt the beginning, GitLab's `CHANGELOG` file would look something like this:\n\n```text\nv 8.0.0 (unreleased)\n  - Prevent anchors from being hidden by header (Stan Hu)\n  - Remove satellites\n  - Better performance for web editor (switched from satellites to rugged)\n  - Faster merge\n  - ...\n  - Ability to fetch merge requests from refs/merge-requests/:id\n\nv 7.14.1\n  - Improve abuse reports management from admin area\n  - Ability to enable SSL verification for Webhooks\n\nv 7.14.0\n  - Fix bug where non-project members of the target project could set labels on new merge requests.\n  - Upgrade gitlab_git to 7.2.14 to ignore CRLFs in .gitmodules (Stan Hu)\n  - ...\n  - Fix broken code import and display error messages if something went wrong with creating project (Stan Hu)\n```\n\nWhen a developer made a change in the upcoming release, `8.0.0` in this example,\nthey would add a changelog entry at the bottom:\n\n```diff\ndiff --git a/CHANGELOG b/CHANGELOG\nindex de2066f..0fc2c18 100644\n--- a/CHANGELOG\n+++ b/CHANGELOG\n@@ -5,6 +5,7 @@ v 8.0.0 (unreleased)\n   - Faster merge\n   - ...\n   - Ability to fetch merge requests from refs/merge-requests/:id\n+  - Made literally everything better. Evvvvverything!\n\n v 7.14.1\n   - Improve abuse reports management from admin area\n```\n\nAt the same time, another developer might have made a similar change in _their_\nbranch:\n\n```diff\ndiff --git a/CHANGELOG b/CHANGELOG\nindex de2066f..5f81cfd 100644\n--- a/CHANGELOG\n+++ b/CHANGELOG\n@@ -5,6 +5,7 @@ v 8.0.0 (unreleased)\n   - Faster merge\n   - ...\n   - Ability to fetch merge requests from refs/merge-requests/:id\n+  - Made a few things worse. Woops!\n\n v 7.14.1\n   - Improve abuse reports management from admin area\n```\n\nNow when one branch was merged, it'd create a conflict in the other:\n\n```diff\ndiff --cc CHANGELOG\nindex 5f81cfd,0fc2c18..0000000\n--- a/CHANGELOG\n+++ b/CHANGELOG\n@@@ -5,7 -5,7 +5,11 @@@ v 8.0.0 (unreleased\n    - Faster merge\n    - ...\n    - Ability to fetch merge requests from refs/merge-requests/:id\n++\u003C\u003C\u003C\u003C\u003C\u003C\u003C HEAD\n +  - Made a few things worse. Woops!\n++=======\n+   - Made literally everything better. Evvvvverything!\n++>>>>>>> developer-1\n\n  v 7.14.1\n    - Improve abuse reports management from admin area\n```\n\nThis resulted in a ton of wasted time as something would get merged, and then\nevery other open branch adding a changelog entry would need to be rebased. The\nsituation only got worse as the number of contributors to GitLab grew over time.\n\nOur initial, [boring solution] to the problem was to begin adding empty\nplaceholder entries at the beginning of each monthly release cycle. The\nchangelog for the upcoming unreleased version might look like this:\n\n```\nv8.1.0 (unreleased)\n  -\n  -\n  -\n  -\n  -\n  -\n  -\n  - (and so on)\n```\n\nA developer would make their change and then choose a random spot in the list to\nadd a changelog entry. This worked for a while, until the placeholders began to\nbe filled out as we got closer to the release date. Eventually two (or more)\nmerge requests would attempt to add different entries at the same placeholder,\nand one being merged created a conflict in the others.\n\nThe problem was lessened, but not solved.\n\nNot only was this a huge waste of time for developers, it created an additional\nheadache for [release managers] when they cherry-picked a commit into a stable\nbranch for a patch release. If the commit included a changelog entry, which any\nchange intended for a patch release _should_ have, cherry-picking that commit\nwould bring in the contents of the changelog at the point of that commit, often\nincluding dozens of unrelated changes. The release manager would have to\nmanually remove the unrelated entries, often doing this multiple times per\nrelease. This was compounded when we had to release multiple patch versions at\nonce due to a security issue.\n\n[very first commit]: https://gitlab.com/gitlab-org/gitlab-ce/commit/9ba1224867665844b117fa037e1465bb706b3685\n[Enterprise Edition (EE) was introduced]: /releases/2013/07/22/announcing-gitlab-enterprise-edition/\n[got a changelog of its own]: https://gitlab.com/gitlab-org/gitlab-ee/commit/e316324be5f71f02a01ae007ab1cf5cbe410c2e1\n[boring solution]: https://handbook.gitlab.com/handbook/values/#efficiency\n[release managers]: https://gitlab.com/gitlab-org/release/docs/blob/master/quickstart/release-manager.md\n\n## Brainstorming solutions\n\nFrustrations with the process finally reached a tipping point, and [an issue was\ncreated] to discuss a solution. [Yorick] had the [original idea] that would\nultimately form the foundation of our solution. During a [trip around the\nworld], myself, [Douwe], and [Marin] were in Brooklyn, NY, and during a walk\naround the city one beautiful summer evening we ended up [with a proposal] to\nfinally solve the problem.\n\nEach changelog entry would be its own YAML file in a `CHANGELOG/unreleased`\nfolder. When a release manager went to cherry-pick a merge into a stable branch\nin preparation for a release, they'd use a custom script that would perform the\ncherry-pick and then move any changelog entry added by that action to a\nversion-specific subfolder, such as `CHANGELOG/8.9.4`. At the time of release,\nany entries in the version's subfolder would be compiled into a single Markdown\nchangelog file, and then deleted.\n\nWith an idea of where we wanted to end up but no idea how to get there, I\nstarted with a [spike].\n\n[an issue was created]: https://gitlab.com/gitlab-org/gitlab-ce/issues/17826\n[original idea]: https://gitlab.com/gitlab-org/gitlab-ce/issues/17826#note_12623521\n[Yorick]: /company/team/#yorickpeterse\n[Douwe]: /company/team/#DouweM\n[Marin]: /company/team/#maxlazio\n[trip around the world]: /2016/08/24/gitlab-in-action/\n[spike]: https://gitlab.com/snippets/1713271\n\n## A turning point\n\nAfter a few days of working on the spike, I [had a realization] that we didn't\nneed the cherry-picking concept at all:\n\n> Cherry picking a merge commit into a stable branch will add that merge's\n> `CHANGELOG/unreleased/whatever-its-called.yml` file to the stable branch. Upon\n> tagging a release with release-tools, we can consider _everything_ in that\n> stable branch's \"unreleased\" folder as part of the tagged release. We collect\n> those files, compile them to Markdown, remove them from the stable branch\n> _and_ `master`, and that's our changelog for the release.\n\nThis was a major \"aha\" moment, as it greatly simplified the\nworkflow for release managers. They could continue their existing workflow, and\nthe release flow would transparently handle the rest. It also meant we could\nhandle everything in our [release-tools] project, which is responsible\nfor tagging a release and kicking off our packaging.\n\nEven though we ended up not using a lot of the work that went into it, my\noriginal spike was still valuable. It allowed us to see pain points early on,\nrefine the process, and find a better solution. It also gave me additional\nexperience interacting with Git repositories programmatically via [Rugged], and\nthat would go on to be especially useful as we implemented the final tooling.\n\n[with a proposal]: https://gitlab.com/gitlab-org/gitlab-ce/issues/17826#note_12998363\n[had a realization]: https://gitlab.com/gitlab-org/gitlab-ce/issues/17826#note_13527876\n[release-tools]: https://gitlab.com/gitlab-org/release-tools/\n[Rugged]: https://github.com/libgit2/rugged\n\n## Building the building blocks\n\nWe knew there were several components that we'd need to build:\n\n1. Something to read and represent the individual YAML data files\n1. Something to compile individual entries into a Markdown list\n1. Something to insert the compiled Markdown into the _correct spot_ in an\n   existing list of releases\n1. Something to remove the files that had been compiled, and then commit the\n   updated `CHANGELOG.md` file to the repository\n\nAll of these components were created in a [single merge request] and refined\nthrough several code review cycles. The commits listed there are all fairly\natomic and may be interesting to read through on their own. The code review that\nhappened in the merge request was incredibly valuable, and allowed us to really\nsimplify some code that was hard to wrap one's head around, even for me as the\noriginal author!\n\n## Automated testing\n\nOf course, we wouldn't consider this solution complete until we had automated\ntests guaranteeing the behavior and consistency of the automated compilation,\nincluding reading from and writing to multiple branches across multiple\nrepositories.\n\nI ended up using Rugged to create [fixture repositories] that would create a\nrepeatable testing environment, which we could then verify with [custom RSpec\nmatchers].\n\n[single merge request]: https://gitlab.com/gitlab-org/release-tools/merge_requests/29\n[fixture repositories]: https://gitlab.com/gitlab-org/release-tools/blob/6531d8d7b7acbdf6ab577db4381036bbc18e3bbc/spec/support/changelog_fixture.rb\n[custom RSpec matchers]: https://gitlab.com/gitlab-org/release-tools/blob/6531d8d7b7acbdf6ab577db4381036bbc18e3bbc/spec/support/matchers/rugged_matchers.rb\n\n## Hooking into the release process\n\nAt this point we were fairly confident the changelog compilation worked, so it\nwas time to [hook it into our existing release process].\n\nWhile testing this integration on a real release, we uncovered a pretty\nhilarious (but dangerous) oversight. I'll let the commit that fixed it speak for\nitself:\n\n> [Protect against deleting everything when there are no changelog entries](https://gitlab.com/gitlab-org/release-tools/merge_requests/47/diffs?commit_id=5b3fe48a7697bda856b6bed1fedc4c210439849b)\n>\n> On a stable branch with no changelog entry files, the resulting empty\n> array was passed to `Rugged::Index#remove_all` which, when given an\n> empty array, removes **everything**. This was not ideal.\n\n[hook it into our existing release process]: https://gitlab.com/gitlab-org/release-tools/merge_requests/47\n\n## Developer tooling\n\nThe final pieces of the puzzle were creating a tool to help developers create\nvalid changelog entries easily, and adding documentation. Both were handled in\n[this merge request](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/7098).\n\nThis tool allows developers to run `bin/changelog`, passing it the title of\ntheir change, to generate a valid changelog entry file. Additional options are\n[in the documentation](https://docs.gitlab.com/ee/development/changelog.html).\n\n## Future plans\n\nThis changelog process has worked beautifully for us since it was introduced,\nand we know it might be just as useful to other projects. We're [investigating a\nway to make it more generic] so that it can remove a tedious chore for more\ndevelopers.\n\nI worked on this project as part of our Edge team, now known as the [Quality\nteam]. If you're interested in this kind of internal tooling or other\nautomation, we're hiring! Check out our [open positions](/jobs/).\n\n[investigating a way to make it more generic]: https://gitlab.com/gitlab-org/release-tools/issues/209\n[Quality team]: https://about.gitlab.com/handbook/engineering/quality/\n\nPhoto by [Patrick Tomasso](https://unsplash.com/photos/1S-PanVaJmU?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/abstract?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,723],{"slug":5849,"featured":6,"template":680},"solving-gitlabs-changelog-conflict-crisis","content:en-us:blog:solving-gitlabs-changelog-conflict-crisis.yml","Solving Gitlabs Changelog Conflict Crisis","en-us/blog/solving-gitlabs-changelog-conflict-crisis.yml","en-us/blog/solving-gitlabs-changelog-conflict-crisis",{"_path":5855,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5856,"content":5862,"config":5867,"_id":5869,"_type":14,"title":5870,"_source":16,"_file":5871,"_stem":5872,"_extension":19},"/en-us/blog/start-contributing-to-gitlab-today",{"title":5857,"description":5858,"ogTitle":5857,"ogDescription":5858,"noIndex":6,"ogImage":5859,"ogUrl":5860,"ogSiteName":667,"ogType":668,"canonicalUrls":5860,"schema":5861},"Start contributing to GitLab today","Learn how to start contributing to GitLab and how GitLab team members are here to help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676361/Blog/Hero%20Images/collaboration.jpg","https://about.gitlab.com/blog/start-contributing-to-gitlab-today","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Start contributing to GitLab today\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rémy Coutable\"}],\n        \"datePublished\": \"2020-09-30\",\n      }",{"title":5857,"description":5858,"authors":5863,"heroImage":5859,"date":3286,"body":5865,"category":698,"tags":5866},[5864],"Rémy Coutable","\n{::options parse_block_html=\"true\" /}\n\nAt GitLab, [everyone can contribute](https://about.gitlab.com/company/mission/#mission). This has been our mission from day\none, since GitLab started as --and is still-- an open-source project.\n\nWe believe that, when consumers become contributors, it benefits everyone: GitLab the product, GitLab the company, GitLab the community\nas well as all GitLab users all around the world.\n\nWe already merged more than 7,700 [“community contribution”](https://gitlab.com/groups/gitlab-org/-/merge_requests?label_name%5B%5D=Community+contribution&state=merged) merge requests from our wider community (at the `gitlab-org` group level).\n\n![Screenshot showing more than 7,700 merged community MRs](https://about.gitlab.com/images/blogimages/2020-09-30-community-contributions.png){: .shadow.medium.center}\n*\u003Csmall>Merge requests from community members not employed by GitLab (aka from the GitLab wider community)\u003C/small>*\n\n## Contributing tracks\n\nNow, it's your turn to contribute and improve GitLab! Since not everyone share the same interests nor competencies, we\nhave multiple tracks to ensure everyone can contribute:\n\n- [Development (new features, bug fixes, performance improvements)](/community/contribute/development/)\n- [Documentation addition, improvements, and fixes](/community/contribute/documentation/)\n- [Translations](/community/contribute/translation/)\n- [UX design](https://about.gitlab.com/community/contribute/ux-design/)\n- [Project templates](/community/contribute/project-templates/)\n\nWhen you're ready, simply choose the track for you and follow the instructions.\n\n## Start small...\n\nTo get familiar with the merge request workflow, I advise you start small.\n[Fixing a typo](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/42447) or\n[adding a comma](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/43021) in the documentation are small yet awesome\ncontributions that are usually merged in a matter of hours. These are awesome to gear up and get the ball rolling.\n\nFor more examples, be sure to take a look at the [community merge requests that touched GitLab documentation](https://gitlab.com/gitlab-org/gitlab/-/merge_requests?state=merged&label_name[]=documentation&label_name[]=Community%20contribution).\n\nThese kind of changes don't require a lot of time from you, but if you have more time and are ready to tackle bigger challenges,\nyou can start looking for [bugs](https://gitlab.com/gitlab-org/gitlab/-/issues?label_name%5B%5D=Accepting+merge+requests&label_name[]=type::bug&scope=all&sort=popularity&state=opened)\nor [feature proposals](https://gitlab.com/gitlab-org/gitlab/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=Accepting%20merge%20requests&label_name[]=feature).\n\n## ...and end up MVP\n\nEvery contribution is a collaborative effort between the merge request author, the reviewer(s), potentially MR coaches, and the maintainer (who gets to merge the MR).\n\nSome contributions are so complex and technical that they take months of collaboration to get accross the finish line!\n\nLet's give you a few examples of great collaborative efforts that happened in the last 12 months:\n\n1. [Cédric Tabin](https://gitlab.com/ctabin) worked for more than 9 months contributing\n   [a new CI job keyword allowing interruptible builds](/releases/2019/09/22/gitlab-12-3-released/#interruptible-keyword-to-indicate-if-a-job-can-be-safely-canceled)\n   and working with the GitLab teams to get it across the line. The [merge request](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/23464) involved 51 people, who posted 405 discussion notes!\n   This contribution was released in GitLab 12.3, and allows to save a lot of money by avoiding running redundant pipelines.\n1. [Tuomo Ala-Vannesluoma](https://gitlab.com/tuomoa) worked for 7 months adding support for\n   [previewing artifacts that are not public](/releases/2019/10/22/gitlab-12-4-released/#private-project-support-for-online-view-of-html-artifacts), a highly requested feature with almost 300 upvotes!\n   The [merge request](https://gitlab.com/gitlab-org/gitlab-pages/-/merge_requests/134) landed in GitLab 12.4, and received two 🍾 emoji votes.\n1. [Roger Meier](https://gitlab.com/bufferoverflow) worked for more than 4 months contributing\n   [support for S/MIME Signature Verification of Commits](/releases/2020/02/22/gitlab-12-8-released/#smime-signature-verification-of-commits), an important feature for sensitive projects and in regulated industries.\n   Roger's teammate, [Henning Schild](https://gitlab.com/henning-schild), contributed the change upstream to Git and Roger made the change in GitLab.\n   The [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/17773) involved 42 people, who posted 430 discussion notes, and landed in GitLab 12.8.\n1. [Steve Exley](https://gitlab.com/steve.exley) worked for more than 5 months contributing one of\n   [the biggest architectural changes to the Docker executor](/releases/2020/03/22/gitlab-12-9-released/#gitlab-runner-129).\n   that solved multiple issues for the Docker executor, including [jobs sharing the same network bridge](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4430),\n   [services don't work when `network_mode` is specified](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2699),\n   and lastly, services can connect to one another and connect with the build container as well!\n   The [merge request](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1569) involved 69 people, who posted 293 discussion notes. It landed in GitLab 12.9, and received five 🔥 emoji votes.\n1. [Jesse Hall](https://gitlab.com/jessehall3) worked for more than 5 months contributing one of\n   [the Batch Suggestions feature](/releases/2020/07/22/gitlab-13-2-released/#batch-suggestions) which allows MR reviewers to group all suggestions made to a diff and submit them at once.\n   Because each suggestion translates into a Git operation, submitting these individually could take a long time if there were a large number of suggestions. Submitting suggestions in batches has numerous advantages, including time savings, efficient CI resource utilization (only one pipeline for all suggestions), and preventing an overly noisy Git history.\n   The [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/22439) involved 38 people, who posted 358 discussion notes. It landed in GitLab 13.2, and received seven 💚 emoji votes.\n\n## Get some help from the GitLab team\n\nIf you need any help while contributing to GitLab, below are some of the resources that are available.\n\n1. Ask questions on the [Contributors Gitter Channel](https://gitter.im/gitlabhq/contributors).\n1. Get in touch with [Merge Request Coaches](https://handbook.gitlab.com/job-families/expert/merge-request-coach/). To find a merge request coach, go to the GitLab Team Page and search for \"Merge Request Coach\".\n   You can also mention Merge Request Coaches by typing `@gitlab-org/coaches` in a comment.\n1. Find reviewers & maintainers of Gitlab projects in our [handbook](/handbook/engineering/projects/#gitlab) and [mention](https://docs.gitlab.com/ee/user/group/subgroups/#mentioning-subgroups) them in a comment.\n1. If you have feature ideas/questions, you can search for existing issues or create a new issue if there isn't one already. Feel free to [mention](https://docs.gitlab.com/ee/user/group/subgroups/#mentioning-subgroups) [product team members](/handbook/product/categories/) in the issue.\n\nWait for a reviewer. You’ll likely need to change some things once the reviewer has completed a code review for your merge request.\nYou may also need multiple reviews depending on the size of the change.\nIf you don't hear from anyone in a timely manner, feel free to find reviewers or reach out to Merge Request Coaches.\nPlease don't be shy about [mentioning](https://docs.gitlab.com/ee/user/project/issues/index.html)\nGitLab team members in your merge requests as all team members are expected to be responsive to fellow community members.\n\n## How we stay on top of community contributions\n\nIn Q3 of 2020, several GitLab teams are focusing on improving the experience for community contributors. To achieve this goal,\nwe created a few metrics around community contributions:\n\n* [Community Contribution Mean Time to Merge](/handbook/engineering/quality/performance-indicators/#community-contribution-mean-time-to-merge)\n* [Unique Community Contributors per Month](/handbook/engineering/quality/performance-indicators/#unique-community-contributors-per-month)\n* [Community MR Coaches per Month](/handbook/engineering/quality/performance-indicators/#community-mr-coaches-per-month)\n\nTo make sure the GitLab team is working hand in hand with the wider community in a timely fashion, we've already put a few automations in place:\n\n1. Every hour, wider community contributions are automatically [labelled \"Community contribution\"](/handbook/engineering/quality/triage-operations/#community-contributions).\n1. Every day, a report with the [untriaged](/handbook/engineering/quality/merge-request-triage/) community merge requests is created and assigned to the Merge Request Coaches for triage. This ensures each merge request has a [stage and group](/handbook/product/categories/#hierarchy) labels set.\n1. Every two weeks, a report with unassigned and idle community contributions is created for each [group](/handbook/product/categories/#hierarchy).\n\nThese automations are powered by our [`triage-ops` project](https://gitlab.com/gitlab-org/quality/triage-ops/) and are documented in [Triage Operations](/handbook/engineering/quality/triage-operations/).\n\nI hope this post convinced you to start contributing to GitLab. Keep in mind, any contribution is valuable, and don't worry, we're here to support you.\n\nCover image: [\"Żuki leśne na liściu jesienią\"](https://unsplash.com/photos/5S2xIoNpcGk) by [Krzysztof Niewolny](https://unsplash.com/@epan5).\n{: .note}\n",[811,267,767,9,745],{"slug":5868,"featured":6,"template":680},"start-contributing-to-gitlab-today","content:en-us:blog:start-contributing-to-gitlab-today.yml","Start Contributing To Gitlab Today","en-us/blog/start-contributing-to-gitlab-today.yml","en-us/blog/start-contributing-to-gitlab-today",{"_path":5874,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5875,"content":5881,"config":5886,"_id":5888,"_type":14,"title":5889,"_source":16,"_file":5890,"_stem":5891,"_extension":19},"/en-us/blog/start-using-pages-quickly",{"title":5876,"description":5877,"ogTitle":5876,"ogDescription":5877,"noIndex":6,"ogImage":5878,"ogUrl":5879,"ogSiteName":667,"ogType":668,"canonicalUrls":5879,"schema":5880},"New: How to get up and running quickly using GitLab Pages templates","We're introducing bundled GitLab Pages templates, so let's take a look at how easy it really is now to get up and running with a new site.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679908/Blog/Hero%20Images/pages-templates-cover-image.jpg","https://about.gitlab.com/blog/start-using-pages-quickly","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New: How to get up and running quickly using GitLab Pages templates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-02-20\",\n      }",{"title":5876,"description":5877,"authors":5882,"heroImage":5878,"date":5883,"body":5884,"category":743,"tags":5885},[2413],"2019-02-20","\n\nHello everyone, my name is Jason Yavorska and I'm the product manager for the [Release stage](/stages-devops-lifecycle/release/) here at GitLab, which includes GitLab Pages. In our [GitLab 11.8 release (March 2019) we're introducing](https://gitlab.com/gitlab-org/gitlab-ce/issues/47857) a quick way to select from our most popular [Pages templates](https://gitlab.com/pages?sort=stars_desc) directly from the new project setup screen. If you use GitLab.com, you can take advantage of this feature already! It looks a bit like this:\n\n![Pages Templates View](https://about.gitlab.com/images/blogimages/pages-templates-view.png){: .shadow.medium.center}\n\nNow, instead of having to fork an existing template, you can simply select one of the bundled ones and get going right away. If you're interested in one of the other templates, you can still create those in the old way – check out the [existing documentation on how to fork a template](https://docs.gitlab.com/ee/user/project/pages/index.html#fork-a-project-to-get-started-from).\n\nIn this article I'm going to show you just how effortless all of this can be. But first:\n\n## My experience contributing GitLab Pages templates\n\nFirst, though, I'd be remiss if I didn't mention that I contributed this change myself (with the help of a few key supporting players, of course.) Now, you may be wondering: I thought you were a product manager at GitLab? Not a developer? Well, that's absolutely true, but I am a hobbyist programmer on the side. I've contributed a small change here or there on my own time, but this was the largest, most complex thing that I've ever contributed myself.\n\nI always find in these situations that contributing is in some ways easier than you expect, and in some ways more challenging. Getting the code working was actually surprisingly straightforward: I was able to get our GDK ([GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit/blob/master/README.md)) up and running with minimal hassle, and then was able to iterate quickly until I found a working solution. Most of my challenges ended up being around getting the change through our review process and into the release. There's a lot you have to learn there, and I think it just takes some time and practice in order to have it all click. What was truly amazing, though, was all the friendly people who jumped in to help me along the way. I learned so much and am so proud of how everything came together in the end.\n\nIf you're considering making your first contribution, feel free to reach out to me on Twitter ([@j4yav](https://twitter.com/j4yav)) and I'll be happy to help guide you in the right direction. Contributing to open source is a great feeling, big or small, and if you haven't tried it before you should really give it a go.\n\n## Now let's set up a site!\n\nWith that out of the way, let's see this in action to appreciate just how painless it really is to set up a new site in GitLab pages now.\n\nThe video below walks through the steps, with full instructions underneath.\n\n Note that if you're using a private on-premise version of GitLab, be sure to check with your administrator to ensure that Pages is enabled. You may need to adjust some of the URLs in the setup below depending on your site configuration.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://youtube.com/embed/C2E1M-4Jvd0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### 1. Create the new project\n\nFor this example, we'll use the [Hugo](https://gohugo.io/) template, our most popular one. Simply go to the GitLab home page, and select \"New Project\" from the top right. Click on \"Create from template,\" click on the Hugo template, and then click on \"Use template.\" Give it a name like `namespace.gitlab.io`, where `namespace` is your `username` or `groupname`.\n\n### 2. Run your first pipeline\n\nWe need to make one quick edit, which will naturally kick off a pipeline and deploy our site for the first time. What we need to do is edit our `config.toml` to have the same URL that we set up in the project name. To do this we will go to Repository → Files, click on the `config.toml` file, and then click on \"Edit\" in the toolbar. All we need to do is change the `baseurl = \"https://pages.gitlab.io/hugo/\"` line to `baseurl = \"https://namespace.gitlab.io/\"` (again, replacing `namespace` with your `username` or `groupname`).\n\nCommit your changes, then head over to CI/CD → Pipelines and look for the new pipeline that's running. You can click on the status to see the build log, or just wait for it to finish – you might be surprised at how fast this is! Once the pipeline passes, we're good to go. It may take a minute or two for everything to work through replication, but once it does, you can see your new site at `https://namespace.gitlab.io/`, beautiful template included, just waiting for you to customize further.\n\n### 3. Where to go next\n\nThere's a lot of basic configuration for your site in the `config.toml`, check that out and see what you might like to modify. The about page is in `/content/page/about.md`, and you can see example posts for your blog in `/content/post` – feel free to delete these when you're done with them. Since these are written in [markdown](https://docs.gitlab.com/ee/user/markdown.html) they are a piece of cake to edit or add new ones. Getting started with Hugo is a bit out of scope for this post, but I assure you it's quite straightforward. You can check out the [Hugo getting started pages](https://gohugo.io/getting-started/) for more ideas on what you can do. Be sure also to check out [Hugo themes](https://gohugo.io/themes/) if you're looking for inspiration.\n\nHopefully this was helpful in getting you started. Good luck with your new site!\n\nCover image by José Alejandro Cuffia(https://unsplash.com/@alecuffia) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[993,677,9,745],{"slug":5887,"featured":6,"template":680},"start-using-pages-quickly","content:en-us:blog:start-using-pages-quickly.yml","Start Using Pages Quickly","en-us/blog/start-using-pages-quickly.yml","en-us/blog/start-using-pages-quickly",{"_path":5893,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5894,"content":5900,"config":5906,"_id":5908,"_type":14,"title":5909,"_source":16,"_file":5910,"_stem":5911,"_extension":19},"/en-us/blog/starting-from-the-start-slippers-design-system",{"title":5895,"description":5896,"ogTitle":5895,"ogDescription":5896,"noIndex":6,"ogImage":5897,"ogUrl":5898,"ogSiteName":667,"ogType":668,"canonicalUrls":5898,"schema":5899},"Why design systems benefit everyone","Learn how the GitLab digital experience team built the Slippers design system for our marketing website.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679537/Blog/Hero%20Images/slippers-sys.jpg","https://about.gitlab.com/blog/starting-from-the-start-slippers-design-system","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why design systems benefit everyone\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stephen McGuinness\"}],\n        \"datePublished\": \"2021-03-05\",\n      }",{"title":5895,"description":5896,"authors":5901,"heroImage":5897,"date":5903,"body":5904,"category":743,"tags":5905},[5902],"Stephen McGuinness","2021-03-05","\n\nThe [Digital Experience team](/handbook/marketing/digital-experience/) is new at GitLab, but we spent the past few months [creating Slippers, a new design system, which is a centralized location for design assets and code](https://gitlab.com/gitlab-com/marketing/digital-experience/slippers-ui). This blog post explains how we managed to build a design system in record time and accounts for how we overcame some of the challenges we encountered along the way.\n\nWe built Slippers because we needed a design system that we could rapidly iterate on and that would scale. We needed to use technologies that offered a single source of truth so our growing team could build on the repo. This process is not without its frustrations – what can work for one team might not work for the entire marketing department. In the past, discrepancies in design would happen because we didn't have a style guide.\n\nFortunately, creating a system that can respond to quick iterations can provide a solution to this complex problem. But \"simple\" in this case is misleading. We needed a new way of thinking and working. It is not enough to create a UI kit of consistent design assets for your designers to work with, doing this alone will fall at the first hurdle if it is not reflected in a coded repo. Designs will produce variations over time. Technical and design debt builds up due to small changes made over time and you end up where you started – with fragmented design and code.\n\nTime and effort as well as a vision are necessary to create a design system solution. This is the place our new team was at near the end of 2020. An already bizarre year for many, this was a great time to create a team to tackle this technical challenge head-on.\n\n## Why design systems are for everyone\n\nA common misconception of a design system is that it is for designers. You create a UI kit, hand it to developers, and you are off to the races. While a UI kit is important to the success of a system, it is just one part of what is a technical and efficient product.\n\nOur goal was to create a reusable library of assets, which included design assets (typestack, colors, spacing, grid, buttons, etc.) along with documentation on usage criteria. This is a big project that requires a lot of effort. First, we aligned around a common vision and product architecture. I want to emphasize \"product\" because this system acts as a product serving multiple teams across GitLab. Next, we rallied our team around a common goal and got to work. Our team established a set of guiding principles that would always act as our anchor for the project. [You can read more about them here](https://gitlab.com/gitlab-com/marketing/digital-experience/slippers-ui).\n\n*\"The more decisions you put off, and the longer you delay them, the more expensive they become.\"*\n\n**―[Craig Villamor](https://www.linkedin.com/in/craigvillamor), senior design director of Google Maps**\n\nWe found this quote from Craig in a [Medium post about the benefits of design systems](https://medium.com/agileactors/7-quotes-about-design-systems-that-will-inspire-you-9a89557fb26f). His remarks describe the dangers of putting off building a design system for too long. The fact is, the longer you design without a clear system and rubric, the more tech and design debt accumulates.\n\n## How we built the design system\n\nProducts exist to solve problems, so we articulated our vision with working sessions. The sessions were a platform for aligning our vision based on what we considered maintainable design and technology.\n\nOnce we aligned on our guiding principles we set about creating a roadmap. Our team decided how we wanted our product to be built, and agreed on tooling, tech stacks, and a cadence of delivery during our working sessions.\n\nWe decided on Figma for design since this was already being used within GitLab. Next, we created our core elements along with some [baseline components such as type, color, and spacing for the design system](https://www.figma.com/file/nWIOpmuMp7RZXmfTj6ujAF/Slippers_foundations?node-id=1292%3A573). We used existing pages as templates to refactor and give us a broader idea of what was and was not working. This process gave our developers time to investigate the best way to code our product and determine what shape it would take.\n\n## The value of a shared language\n\nOur engineering team started working on our tech stack and our designers started to work on what we called our \"foundations\". This can also be referred to as \"elements\". We did this in a way so we could stress-test our foundations package by refactoring existing pages with new styles that gave us an idea of the direction of our design system.\n\nNext, we applied these core elements to a select sample of pages to act as a proof of concept. We chose to edit the [homepage](https://about.gitlab.com/), [enterprise page](/enterprise/), [pricing page](/pricing/), and [entire GitLab Blog section](/blog/). We identified pain points and apply stop-gaps along the way. Since we are [results-driven](https://handbook.gitlab.com/handbook/values/#results), we used local CSS (Cascading Style Sheets) tightly coupled to the site itself. The perk of this approach is that you can deliver results quickly. After doing some UX and UI refinements on these pages, introducing new technology was easier because each of the pages are actively maintained. We used this time to learn and apply this practice to improve the system.\n\n## What's next\n\nThough the Digital Experience team has only been established for four months we've made huge inroads. We are starting to see how the Slippers design system will look once it is implemented across the entire organization.\n\nBuilding the Slippers design system is an example of a research and development (R&D) project. By laying out these foundations, we are set up for large-scale learning and success. The team is continuously gathering data for this R&D project and using it to better inform and refine our design system.\n\nAlso, since GitLab is open source, we are factoring open source values into our Slippers roadmap. We do this through posting our video updates to our partners and [public YouTube videos](https://www.youtube.com/c/GitLabUnfiltered/featured).\n\nThe reality is, this work takes time and investment. There is a herculean effort still left for us to bring the system fully to life. But already we have demonstrated the value of a design system to our leadership by delivering more than 2000 new CMS pages.\n\nEven at this very early stage the Slippers project has been rewarding and provides us with a continuous source of valuable insights. We're encouraged to push the boundaries and take calculated risks in what we learn and what we do.\n\nStay up-to-speed on our progress by checking out our [Slippers project](https://gitlab.com/gitlab-com/marketing/digital-experience/slippers-ui) and [watching our team videos on GitLab Unfiltered](https://www.youtube.com/c/GitLabUnfiltered/featured).\n\nCover photo by [Nihal Demirci](https://unsplash.com/@nihaldemirci?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/0ME-BIUBmUs)\n{: .note}\n",[811,9,5429,700],{"slug":5907,"featured":6,"template":680},"starting-from-the-start-slippers-design-system","content:en-us:blog:starting-from-the-start-slippers-design-system.yml","Starting From The Start Slippers Design System","en-us/blog/starting-from-the-start-slippers-design-system.yml","en-us/blog/starting-from-the-start-slippers-design-system",{"_path":5913,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5914,"content":5920,"config":5925,"_id":5927,"_type":14,"title":5928,"_source":16,"_file":5929,"_stem":5930,"_extension":19},"/en-us/blog/stealth-operations-the-evolution-of-gitlabs-red-team",{"title":5915,"description":5916,"ogTitle":5915,"ogDescription":5916,"noIndex":6,"ogImage":5917,"ogUrl":5918,"ogSiteName":667,"ogType":668,"canonicalUrls":5918,"schema":5919},"Stealth operations: The evolution of GitLab's Red Team","We discuss how GitLab's Red Team has matured over the years, evolving from opportunistic hacking to stealth adversary emulation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659561/Blog/Hero%20Images/securitycheck.png","https://about.gitlab.com/blog/stealth-operations-the-evolution-of-gitlabs-red-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Stealth operations: The evolution of GitLab's Red Team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Moberly\"}],\n        \"datePublished\": \"2023-11-20\",\n      }",{"title":5915,"description":5916,"authors":5921,"heroImage":5917,"date":5922,"body":5923,"category":720,"tags":5924},[3517],"2023-11-20","At GitLab, our Red Team conducts security exercises that emulate real-world threats. When the team was first formed, these exercises were opportunistic and done in plain sight. As the GitLab Security organization matured, so did our Red Team.\n\nWe now perform a majority of our operations in stealth, meaning that only a small group of team members are aware of the details.\n\nThis blog dives into the steps we took as we matured and lessons we learned along the way. We also share highlights of a recent stealth operation and the value it provided our organization.\n\nIf you're building an offensive security practice, or looking to mature an existing one, you may find some inspiration below.\n\n## Where we started\n\nOur Red Team was formed in July 2019 - about four years ago. We started off as three engineers and one manager spread across the U.S., Australia, and Europe.\n\nBack then, GitLab's security maturity was at an earlier stage. Some of the more advanced capabilities we have in place today were still being planned or improved.\n\nAs newly hired hackers, it was tempting to jump right into emulating advanced threat actors in top-secret operations. But we weren't just hackers - we were a Red Team with a mission to help make our organization more secure. It wasn't just about attacking all the things, it was about identifying and addressing realistic threats.\n\n### Getting to know GitLab\n\nBefore we started hacking, we did the following:\n- Wrote down [what we were doing](https://handbook.gitlab.com/handbook/security/threat-management/red-team/#what-the-red-team-does), [why were doing it](https://handbook.gitlab.com/handbook/security/threat-management/red-team/), and [what rules we would stick to](https://handbook.gitlab.com/handbook/security/threat-management/red-team/red-team-roe/). This was critical to our success, especially as a team that worked asynchronously across time zones.\n- Met with our counterparts in Security Incident Response (SIRT) to understand how they could benefit from an offensive security practice.\n- Met with our counterparts in Engineering and IT to build relationships and help them understand our overall goals and approach.\n- Read. A lot. Documentation, runbooks, architecture diagrams. Whatever we could find to understand GitLab's environment and attack surface.\n\n### Getting to work\n\nFinally, it was time to hack.\n\nWe started out doing what we called \"open-scope\" work, which was similar to a penetration test but without the bureaucracy and boundaries of a typical time-based engagement. We wrote enumeration scripts, scanned publicly exposed cloud resources, and hunted for leaked secrets.\n\nWhen we found something that could be hacked, we hacked it and reported it in an [issue](https://docs.gitlab.com/ee/user/project/issues/) to prevent it from happening again.\n\nAs we noticed patterns emerging, we developed automation to more efficiently find and report them.\n\nThis was great - it reduced risk at GitLab and gave our team a chance to better understand our environment and its risks.\n\nBut it wasn't quite Red Teaming.\n\nWe were finding, exploiting, and reporting vulnerabilities, but we weren't providing GitLab with an opportunity to practice detecting and responding to real-life attackers.\n\n## How we planned to mature\n\nOver time, we found systemic solutions to more and more of the opportunistic findings. A new Vulnerability Management group was formed, taking ownership of our custom scanners and implementing more robust and permanent solutions. Visibility and control over endpoints increased as did the ability to monitor and alert across our entire organization.\n\nAs GitLab's defensive capabilities matured, it became important for the Red Team to do the same. We needed to emulate more advanced attackers and provide more realistic opportunities to detect and respond to these attacks.\n\nWe needed a plan.\n\nWe created a maturity model with unique stages showing where we started, where we were, and where we were headed. Each stage had a list of behaviors the team strived to demonstrate, or states we hoped to achieve.\n\nThis gave us a broad roadmap that we could work towards for the next two-to-three years. Looking back, it was worth the effort. We use the roadmap extensively, leveraging it to guide tricky decisions and to plan quarterly goals that moved us further on our journey.\n\nThe inspiration for our model came from many places, including:\n- The general-purpose [Capabilities Maturity Model](https://en.wikipedia.org/wiki/Capability_Maturity_Model)\n- The [Red Team maturity model](https://www.redteams.fyi) from Jordan Potti, Noah Potti, and Trevin Edgeworth\n- The [Red Team maturity model](https://www.redteammaturity.com/about) from Brent Harrell and Garet Stroup\n\nWe used a GitLab issue board to build the model.\nYou can [read about the logistics and benefits of using an issue board](https://handbook.gitlab.com/handbook/security/maturity-models/) in our handbook.\n\nThis is what our model looks like:\n![maturity-model](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679157/Blog/Content%20Images/maturity-model.png)\n\n## Key milestones along the way\n\nWhen we first wrote our maturity model, we were sitting somewhere in the second column. Moving beyond that would require a big shift - from opportunistically finding and exploiting vulnerabilities to emulating adversaries and providing opportunities for detection and response.\n\nFor us, that path started with Purple Teaming and then moved on to stealth operations.\n\nWe used GitLab epics to make high-level plans for each of these stages. Epics allow you to group individual issues, breaking down long-term projects into actionable tasks.\n\n### Implementing Purple Teaming\n\nPurple Teaming was a pathway to stealth operations. It would give us an opportunity to build and practice our processes transparently and in collaboration with our Blue Team.\n\nWe made a plan to develop these processes and to test them out by conducting a small-scale Purple Team operation. This was done in the context of an OKR (Objectives and Key Results), and took us about three months to complete.\n\nHere is the description from the epic we opened to get started:\n\n> **OKR: Purple Team Foundations & Initial Run**\n>\n> Our SIRT team continues to grow and implement more robust detection and response capabilities. Recently, they have begun to adopt the MITRE ATT&CK framework for classifying attack techniques.\n>\n> These strategies are highly aligned with our own, and build an excellent framework for a more collaborative approach in planning, designing, and executing attack emulations. When both teams are involved in all stages of a campaign, we are more likely to produce an outcome that is actionable and beneficial to the organization.\n>\n> This OKR will allow us to focus on ensuring all of the foundational/logistical pieces are there, and then to execute a smaller controlled operation to make sure we got it right.\n\nAt a high-level, the OKR contained the following tasks:\n- Meet with various teams at GitLab to discuss what we were trying to accomplish, how we would work together across timezones, what rules we should put in place, etc.\n- Plan for specific changes/additions to our handbook to capture the results of those discussions.\n- Collaborate across teams to plan and execute a small operation using these new processes.\n\nWhen the quarter was complete, we had the following to show for it:\n- [Purple Teaming at GitLab](https://handbook.gitlab.com/handbook/security/threat-management/red-team/purple-teaming/): A handbook page describing our methodology\n- [Red Team issue templates](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/red-team-issue-templates): Public, re-usable templates for planning, executing, and reporting on operations\n- \"[How we run Red Team operations remotely](https://about.gitlab.com/blog/how-we-run-red-team-operations-remotely/)\": A blog talking about the how and why we do this work asynchronously across time zones\n\nWe then used those processes and issue templates to plan and execute a small Purple Team operation. The brainstorming stage allowed us to work with our friends in SIRT, identifying recurring security themes and selecting attack techniques that would allow them to improve their detection and response capabilities.\n\nWe replicated a token leak where an attacker leveraged legitimate credentials to establish persistence and move laterally within the GitLab.com environment. This provided an opportunity to test existing security information and event management (SIEM) alerts, validate the ability to locate all malicious activity in log files, and to implement earlier detection and prevention capabilities.\n\nWe made changes to our Purple Teaming processes based on lessons learned. In following quarters, we moved on to full-scale emulation of relevant adversaries using a Purple Team process that was developed and tested in collaboration with groups across our organization.\n\n### Implementing stealth operations\n\nShifting to stealth was a natural evolution from Purple Teaming. We continued to work from our maturity model, operating from the plan that was already established and communicated across the organization.\n\nJust as we did with Purple Teaming, we created an epic to shift to stealth operations by default and aligned it with our quarterly OKR.\n\nThis epic was opened with the following description:\n\n> **OKR:  Improve the maturity of the Red Team by shifting to stealth operations by default**\n>\n> As part of our general team roadmap, we are focusing on maturing the Red Team's processes and procedures this year. This quarter, we will complete various tasks allowing us to shift to a \"stealth by default\" way of performing operations.\n>\n> This will provide the organization a better opportunity to practice detecting and responding to the most relevant and realistic threats.\n>\n> We will do this by:\n>\n> - Refreshing the Red Team Rules of Engagement by collaborating with SIRT and agreeing on processes and procedures.\n> - Researching, documenting, and automating architecture requirements for stealth operations.\n\nWe ended up breaking those two bullet points into separate child epics, as there was a lot of work to do in each.\n\n![child-epics](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679157/Blog/Content%20Images/child-epics.png)\n\nThe first child epic, around processes, resulted in output that is mostly public. Some examples are:\n- A short summary of [Stealth Operations](https://handbook.gitlab.com/handbook/security/threat-management/red-team/#stealth-operations) in our general handbook page\n- A new [Stealth Operations](https://handbook.gitlab.com/handbook/security/threat-management/red-team/red-team-roe/#stealth-operations) section in our rules of engagement\n- Example [Stealth Operation Techniques](https://handbook.gitlab.com/handbook/security/threat-management/red-team/red-team-roe/#stealth-operation-techniques) section, also in the rules of engagement\n- Iterations to our [Red Team issue templates](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/red-team-issue-templates)\n\nWe were very transparent with all of these changes. Each change was a merge request, which was visible to everyone at GitLab. We opened a dedicated issue to discuss any concerns and used an all-company Slack channel to invite everyone to provide feedback.\n\nAs an internal Red Team, building relationships across the organization is key to our success. We find that transparency about how we operate helps us maintain this trust.\n\nThe second child epic, around technical research, resulted in outputs that are mostly not public and involved things like:\n- Using \"Attacker VMs\" with Parallels on our corporate laptops. This provides us a space without security monitoring, where we can use commercial VPNs to appear as separate entities when emulating remote attackers.\n- Working with our IT department to acquire our own AWS accounts with exceptions to standard security monitoring. This gives us a space to install our C2 infrastructure, phishing sites, etc.\n- Testing various command and control (C2) frameworks, agents, and redirectors. Designing automation to deploy these environments from scratch with each new operation.\n- Establishing private communication channels and a wiki for Red Team engineers and trusted participants.\n- Testing encrypted secret management tools for temporary storage during operations.\n\n## Results from a recent stealth operation\n\nWith our new tools and processes in place, stealth operations became our default.\n\nOne recent operation began with selecting an attack group that had been in the news for targeting organizations similar to ours. This operation spanned three months - the majority of which was spent on researching the adversary and developing capabilities to emulate them.\n\nWe started with a volunteer from a non-security team at GitLab. They were one of our \"trusted participants\", meaning they were briefed on the operation. We had them visit a website we created which mimicked the download page of a popular open-source desktop utility. They downloaded the utility and followed the on-screen instructions to install and authorize it.\n\nThe application was a modified fork of the legitimate tool, created just for this operation. It contained an embedded script which downloaded our command and control (C2) agent and provided the Red Team access to the laptop. This scenario mirrored the adversary we were emulating, who would deploy malware to engineers' laptops.\n\nUsing an insider to launch the initial payload is a common Red Team technique called an \"assumed breach.\" This allows the Red Team to focus their efforts on emulating post-exploitation activities, where there is more value in practicing detection and response.\n\nWith remote access achieved, the Red Team conducted various attack techniques locally on the laptop to steal web browser cookies and impersonate their active sessions.\nFrom there, we pursued further objectives similar to those of our emulated adversary.\n\nThese techniques triggered an alert from our SIEM system. This created an incident with our SIRT team, who immediately took action to contain and investigate the incident.\n\nSelect members of security leadership were included as trusted participants in the operation. We were all closely monitoring the investigation from a Slack room set up for this purpose. This allowed the SIRT engineers to experience responding to a very realistic attack while preventing the incident from escalating too far.\n\nAt some point during the investigation, it was revealed that the attacker was in fact the Red Team. SIRT had performed a thorough investigation, collaborating across the team to trace the attack back to our initial access vector.\n\nThis operation helped us validate some existing detection capabilities, recommend improvements for more, and give the team a chance to work together to solve an interesting challenge in a safe and controlled environment. This type of experience only comes from conducting attack operations in stealth, which is exactly why we have an internal Red Team at GitLab.\n\n## What we learned\n\nAt GitLab, we believe that performing Red Team operations in stealth provides the most realistic opportunity to practice detecting and responding to real-life attacks.\n\nWe also realize that every organization is different, and your security evolution may follow a different path.\n\nWe learned that having a plan defined early on and shared transparently across the organization was key to success.\nHere are the things that helped us the most:\n- Defining a maturity model and using it as a roadmap.\n- Committing to broad goals defined in GitLab epics, and breaking them down into manageable tasks inside GitLab issues.\n- Thoroughly documenting processes in our handbook and in GitLab issue templates.\n\nWe would love to hear your thoughts on Red Teaming and how you've managed your own security evolution. If there are any specific topics you'd like our team to write about in the future, please let us know. Feel free to comment below or to open issues or merge requests in any of [our public projects](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public).",[720,9],{"slug":5926,"featured":91,"template":680},"stealth-operations-the-evolution-of-gitlabs-red-team","content:en-us:blog:stealth-operations-the-evolution-of-gitlabs-red-team.yml","Stealth Operations The Evolution Of Gitlabs Red Team","en-us/blog/stealth-operations-the-evolution-of-gitlabs-red-team.yml","en-us/blog/stealth-operations-the-evolution-of-gitlabs-red-team",{"_path":5932,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5933,"content":5939,"config":5943,"_id":5945,"_type":14,"title":5946,"_source":16,"_file":5947,"_stem":5948,"_extension":19},"/en-us/blog/stem-gems-give-girls-role-models",{"title":5934,"description":5935,"ogTitle":5934,"ogDescription":5935,"noIndex":6,"ogImage":5936,"ogUrl":5937,"ogSiteName":667,"ogType":668,"canonicalUrls":5937,"schema":5938},"GitLab + STEM Gems: Giving girls role models in tech","Meet the GitLab team-members working to inspire the next generation to pursue careers in STEM.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672357/Blog/Hero%20Images/stem-gems.png","https://about.gitlab.com/blog/stem-gems-give-girls-role-models","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab + STEM Gems: Giving girls role models in tech\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stephanie Garza\"}],\n        \"datePublished\": \"2018-10-08\",\n      }",{"title":5934,"description":5935,"authors":5940,"heroImage":5936,"date":1976,"body":5941,"category":808,"tags":5942},[4333],"\n\nGitLab recently partnered with [STEM Gems](http://stemgemsbook.com/), an organization creating awareness of successful women in STEM, to inspire girls and give them STEM role models. **STEM** (Science, Technology, Engineering, and Mathematics) pervades every aspect of our lives; everything can be tied to technology in some way, shape, or form. Given the constant expansion of technology, career prospects are endless. One would think STEM is the number one pursued career path right?\n\nSurprisingly, according to the US Department of Commerce, in 2017 only 24 percent of women worked in STEM. Another harsh reality is that women who hold STEM degrees are less likely than their male counterparts to pursue a STEM career. In fact, women are more likely to work in education or healthcare.\n\nDriven by the low numbers, STEM Education advocate Stephanie Espy strived to make a change. Espy created STEM Gems, an organization that began as a book filled with inspiring women in STEM. The book was the stepping stone for a greater initiative to create awareness for the successful female powerhouses in STEM, as well as provide girls with role models to look up to.\n\nGirls who have STEM role models are more likely to pursue opportunities outside their traditional realm, and STEM Gems is making it possible for girls to connect with them. Role models, mentors, and career ambassadors inspire and empower girls to achieve their dreams.\n\nAt [our recent summit in South Africa](/blog/gitlab-summit-cape-town-recap/), forty GitLab team-members came together for an epic power hour of delving into each other's professional pathways and identifying challenges. Participants were paired up and asked to interview each other about their individual careers, goals, and accomplishments. This included the significant others of GitLab team-members and men interested in learning more about making GitLab inclusive. Through this event, we were able to strengthen our relationships and identify ways to foster a culture of inclusion. The event also provided greater visibility into the challenges and barriers women in STEM face.\n\nGitLab is building a community where everyone can thrive. We've gathered together the stories and photos of the GitLab team-members that participated in the event. In this post, and in a follow-up post, we will share each of these amazing stories. We want to inspire and encourage girls to set Big Goals and pursue every dream and remember you’ll always have a friend at GitLab!\n\n![Jenny and Molly](https://about.gitlab.com/images/blogimages/stem-gems/jenny.jpg){: .shadow.small.left.wrap-text}\n\n**Name:** [Jenny Nguyen](/company/team/#lankhanh28) (right)\n\n**Role:** Payroll and Payments Lead\n\n**Why is what you do important?**\nI handle payroll and expense reimbursement, making sure all our team members get paid and reimbursed on time.\n\n**What is something you are really proud of?**\n\nI helped save a previous company $2 million by applying technical logic to processes.\n\n**Did you know you wanted to work in tech when you were growing up? If not, what did you THINK you wanted to be?**\n\nNo, I started my undergrad with Business major and took programming as an elective class. My teacher encouraged me to change my major to Computer Science and Software Engineering, but I didn't have an opportunity to be in a technical position. However, I have applied my technical knowledge and aptitude from school to reduce manual processes within my functions for the past 10 years.\n\n**If you could give advice to a girl thinking about a career in tech, what would it be?**\n\nAs a non-technical person, I want them to know that they don’t have to have a career in technology to have and utilize their own technical skills. Every function needs input from technical and non-technical perspectives.\n\n----\n\n![Ramya](https://about.gitlab.com/images/blogimages/stem-gems/ramya-authappan.png){: .shadow.small.right.wrap-text}\n\n**Name:** [Ramya Authappan](/company/team/#atramya)\n\n**Title:** Senior Test Automation Engineer\n\n**Why is what you do important?**\n\nAt GitLab, I automate tests as much as possible. I design and develop test frameworks. Test automation is the key to Continuous Integration and Delivery, which in turn is essential in minimizing the 'Time to Market' of any new features, thereby achieving customer satisfaction.\n\n**What is something you are really proud of?**\n\nApart from my work at GitLab, I'm also the Director of [Women Who Code](https://www.womenwhocode.com/), Chennai chapter. As part of Women Who Code, I get to meet a lot of female leaders in the technical space. I was recently invited to be a Panelist in a discussion on digital safety help by Google and SheThePeople.tv. I was also [interviewed by a Indian National News channel](https://www.thenewsminute.com/article/women-tech-freshworks-ramya-authappan-importance-mother-friendly-workplaces-78893). I frequently share my knowledge as a conference/meetup speaker. On the whole, I love doing what I do and being who I am!\n\n**Did you know you wanted to work in tech when you were growing up? If not, what did you THINK you wanted to be?**\n\nYes! In my school days I had to choose a specialization at the age of 16 years. I chose Computer Science, and I think I made the right choice. I find that I'm interested in software engineering and always wanted to be a software engineer.\n\n**If you could give advice to a girl thinking about a career in tech, what would it be?**\n\n1. Choose wisely when it comes to specializations.\n2. Keep learning.\n3. Give back to society.\n4. Change the world! The sky is the limit!\n\n----\n\n![Hannah](https://about.gitlab.com/images/blogimages/stem-gems/hannah-schuler.png){: .shadow.left.small.wrap-text}\n\n**Name:** [Hannah Schuler](/company/team/#hannahschuler8)\n\n**Title:** SDR Team Lead – West and APAC\n\n**Why is what you do important?**\n\nI train other SDR team members to identify and create qualified opportunities. I also assist in recruiting team members and also work closely with online marketing managers for targeted ad campaigns. The SDR role is an evangelist role – we get the opportunity to be the first point of contact for people. It's an exciting and challenging role because most often people have never heard of GitLab. Sharing news about a solution that can help people and bring value is exciting.\n\nMy role is important because I facilitate and add structure to the team. I help remove roadblocks and enable us to work more efficiently. I help team members reach their full potential.\n\n**What is something you are really proud of?**\n\nI received a discretionary bonus a few months ago for going above and beyond in my role! Being promoted from an SDR representative to a team lead in nine months was really awesome, I'm very proud of that. I'm a certified SCRUM master and product owner. I am also certified in SAFE (Agile methodology).\n\n**Did you know you wanted to work in tech when you were growing up? If not, what did you THINK you wanted to be?**\n\nIt's evolved over time – when I was little I wanted to be a ballerina. I was super shy, an introvert, and dancing was my way to express myself. When I grew older, everything changed and I become super outgoing. I wanted to make an impact in the world and got a degree in International Business Studies because I wanted to work for the UN. My excitement for technology came a lot later in my career. My friend shared excitement about the industry and that's what initially got my foot in the door. I did not have a traditional background in tech.\n\n**If you could give advice to a girl thinking about a career in tech, what would it be?**\n\nYou will have an impact in this field. Companies are looking for you. You will develop lifelong skills and have an impact in this field. Women are trailblazers in this industry. You can dictate your own earning potential and will have the opportunity to mentor other women as well.\n\n----\n\n![Cristine](https://about.gitlab.com/images/blogimages/stem-gems/cristine-marquardt.png){: .shadow.small.right.wrap-text}\n\n**Name:** [Cristine Marquardt](/company/team/#csotomango)\n\n**Title:** Billing Specialist\n\n**Why is what you do important?**\n\nI process invoices for sales-assisted orders, troubleshoot support tickets (mostly related to money and licensing issues), provide sales support, and I wear a lot of hats. Everyone in the company plays an important role to keep GitLab running. When you work at a startup, you have to be game for all the obstacles that are thrown your way. I never imagined how much I would learn and how much I could contribute in my role.\n\n**What is something you are really proud of?**\n\nI'm currently dabbling in .Net framework and I made my first semi-functional calculator. While this sounds like a rather simple task, this is huge to me since my career has been focused in the finance and accounting world.\n\n**Did you know you wanted to work in tech when you were growing up? If not, what did you THINK you wanted to be?**\n\nI knew that I wanted to work in tech ever since I was a kid. I was fortunate enough to go to a school that had computers in each classroom and there was also a computer lab. I wanted to get into computer engineering when I was in middle/high school, but I never pursued it in college. I'm now pushing myself to learn software development.\n\n**If you could give advice to a girl thinking about a career in tech, what would it be?**\n\nBelieve in yourself and don't be afraid. The only one holding you back is yourself.\n\n----\n\n![Gabriela and Diana](https://about.gitlab.com/images/blogimages/stem-gems/gabriela.jpg){: .shadow.small.right.wrap-text}\n\n**Name:** Gabriela Mena Breña (right)\n\n**Title:** Chemical Engineer (Not at GitLab, I am the significant other of a GitLab team-member)\n\n**Why is what you do important?**\n\nPractical transition from fossil fuels to renewable energy solutions. This will save the planet!\n\n**What is something you are really proud of?**\n\nI led the team that created fiscal terms for the first private investments in Mexican oil and gas resources. This protected the Mexican government's financial stability. We secured $3.1 billion worth of contracts to construct gas pipelines for the Mexican state. I am also proud to have received a full scholarship from the Mexican government to study for a Master's degree in Energy Science.\n\n**Did you know you wanted to work in tech when you were growing up? If not, what did you THINK you wanted to be?**\n\nYes, I found science and math the most challenging, which made them the most interesting to me.\n\n**If you could give advice to a girl thinking about a career in tech, what would it be?**\n\nDon't let anybody else tell you what you can be. Be true to who you really are and focus on your own goals and desires.\n\n----\n\n![Chloe](https://about.gitlab.com/images/blogimages/stem-gems/chloe-whitestone.jpg){: .shadow.small.left.wrap-text}\n\n**Name:** [Chloe Whitestone](/company/team/#drachanya)\n\n**Title:** Talent Operations Specialist\n\n**Why is what you do important?**\n\nI am part of the recruiting team. I do all of the backend operations for recruiting, such as vendor management, reporting, researching on different tools, and employee branding. In addition, I am also the recruiter for a few roles (customer success, UX designer, data engineer). GitLab cannot be what it is without having great talent and I get to be a part of this exciting journey.\n\n**What is something you are really proud of?**\n\nI've played a critical role in the multiple transitions of GitLab's ATS (application tracking system) which has improved candidate experience, increased efficiency, and given greater visibility for hiring managers to hire the best talent possible. Before I was at GitLab, there weren't any tools for recruiting metrics. Through my efforts, GitLab has recruiting metrics and is now able to analyze how they are doing compared to other industry leaders. This has allowed us to improve the hiring process and enabled applicants to get job offers faster than before.\n\n**Chloe also:**\n\n- Migrated Workable to Lever\n- Migrated Lever to Greenhouse\n- Implemented background checks at GitLab\n- Trained GitLab team-members for Greenhouse\n- Created a vacancy process for GitLab\n- Improved onboarding process and experience\n- Became an assistant manager in six months during her first fulltime job\n- Is proud of every hire she has made\n\n**Did you know you wanted to work in tech when you were growing up? If not, what did you THINK you wanted to be?**\n\nGrowing up, I didn't think I would work in Tech. I originally wanted to be president! I was exposed to tech through my high school STEM program. That equipped me to be where I am today.\n\n**If you could give advice to a girl thinking about a career in tech, what would it be?**\n\nStart right away by learning and getting involved in the community. It's harder to start the older you get (IMO). Don't be afraid, no matter how much experience you have or how old you are. You are not alone!\n\n----\n\n![Katherine](https://about.gitlab.com/images/blogimages/stem-gems/katherine-okpara.jpg){: .shadow.small.right.wrap-text}\n\n**Name:** [Katherine Okpara](/company/team/#katokpara)\n\n**Title:** Junior UX Researcher\n\n**Why is what you do important?**\n\nI work with product management and UX designers to understand users' pain points, goals, and needs. My job is to understand where we can improve the product by speaking directly to users. The user experience of a product impacts the customer directly. Positive experiences equal stronger relationships (more feedback) for the product to improve.\n\n**What is something you are really proud of?**\n\nI've received mentorship during my eight months here at GitLab and am now leading studies. I've been able to learn about different features and different aspects of the product at a fast pace. I have helped to build healthy relationships between end users and teams for better product improvements/advancements.\n\n**Did you know you wanted to work in tech when you were growing up? If not, what did you THINK you wanted to be?**\n\nNo. I didn't know anything about tech/computers, etc. until college. I took a few programming/data science classes in college and that's when my interest was piqued. I was on more of an academic path at school (psychology). In my last year of college I took a web design class (applying research to products) and felt that I had found my niche. I have been working on those skills ever since through online courses, research, etc.\n\n**If you could give advice to a girl thinking about a career in tech, what would it be?**\n\nThere is a place for you! Whether it's programming or another area, there are still many paths for consideration. If you come from a non-traditional path, there is always a way to link your skills to your desired role. Believe that you can do it, even if you don't currently have the skills (you can build those skills!).\n\n----\n\n![Lucas](https://about.gitlab.com/images/blogimages/stem-gems/lucas.jpg){: .shadow.small.left.wrap-text}\n\n**Name:** Lucas Charles\n\n**Title:** Individual Contributor to Gitlab (significant other to a GitLab team-member)\n\n**Why is what you do important?**\n\nI am an end-user, and GitLab wouldn't be a product without users. It's built on open source technology, which requires everyone to contribute. As a user and contributor, it is powerful to have everything in one place and GitLab is fun to use. It's easier to go to work every day with software you love.\n\nMy significant other works at GitLab, but I would use it every day regardless. I love the product and company. I think GitLab is doing something important and changing the way we build software.\n\n**What is something you are really proud of?**\n\nWhen my significant other was looking for a new job, I realized that GitLab would be a perfect fit for her and encouraged her to apply. I wanted to do everything I could to help her because I care and it's an amazing opportunity to push herself and contribute to a greater tech community full of diverse people, product, and cultures.\n\nI'm incredibly proud of my significant other. She works on GitLab every day, making the world a more interesting place through technology. I'm quite proud to be part of that network. I'm also proud to be one of the first 1,000 contributors to Gitlab. I'm proud that GitLab chose to recognize that by sending me a special sticker!\n\n**Did you know you wanted to work in tech when you were growing up? If not, what did you THINK you wanted to be?**\n\nI've always been a tinkerer and like to take things apart and put them together. Tech enables me to do that quickly and easily. It is an amazing industry that creates something out of nothing but an idea, and has limitless possibilities. We move fast and many truly believe they are changing the world.\n\n**If you could give advice to a girl thinking about a career in tech, what would it be?**\n\nFirst, to just do it, because it's an incredible field and we need more diversity. Diversity is important: we need a range of ideas, perspectives, and to create more opportunities to understand each other. We should build products that work for everyone and address all needs. Challenging ourselves and growing ourselves through different perspectives is critical for both personal growth and a healthy culture.\n",[810,267,811,9,832],{"slug":5944,"featured":6,"template":680},"stem-gems-give-girls-role-models","content:en-us:blog:stem-gems-give-girls-role-models.yml","Stem Gems Give Girls Role Models","en-us/blog/stem-gems-give-girls-role-models.yml","en-us/blog/stem-gems-give-girls-role-models",{"_path":5950,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5951,"content":5956,"config":5961,"_id":5963,"_type":14,"title":5964,"_source":16,"_file":5965,"_stem":5966,"_extension":19},"/en-us/blog/support-engineering-at-gitlab",{"title":5952,"description":5953,"ogTitle":5952,"ogDescription":5953,"noIndex":6,"ogImage":4387,"ogUrl":5954,"ogSiteName":667,"ogType":668,"canonicalUrls":5954,"schema":5955},"At your service: Support Engineering at GitLab","A new series from GitLab Support Engineering about what we do and how we do it. All remotely of course!","https://about.gitlab.com/blog/support-engineering-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"At your service: Support Engineering at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Matos\"}],\n        \"datePublished\": \"2017-12-01\",\n      }",{"title":5952,"description":5953,"authors":5957,"heroImage":4387,"date":5958,"body":5959,"category":743,"tags":5960},[4683],"2017-12-01","\n\nHi! I’m [Lee Matos](/company/team/#leematos), [Support Team](/handbook/support/) Lead at GitLab and I’m very excited to be kicking off our blog series about what Support Engineering means at GitLab. One of the biggest things that people start with is, \"What’s the difference between Support Engineering and Customer Service?\" Great question! Let’s talk about it.\n\n\u003C!-- more -->\n\n## Support Engineering vs. Customer Service?\n\nTo start, I think Customer Service is a subset of Support Engineering. To be a great support engineer, you should be customer focused, but also technically minded. We address customers' needs via web calls and email daily. So those interactions are where a customer focus is paramount, but we’ll often be debugging Redis Queues or finding slow SQL queries. This is not just relationship management. It’s sussing out the bugs and then squashing them. I think that’s pretty common for support engineering, but we have some unique quirks too.\n\n## What’s unique about Support Engineering at GitLab?\n\nAt GitLab, transparency is a core value. Because of that, our issue trackers are public. This is great for Support because in traditional support models, we act as a router between the company and the customer. What I mean to say by that is that Support is responsible for keeping the customer in the loop as to the status and state of a bug fix or such by holding the ticket open until it gets resolved.\n\nWith our transparency, we get to act more like a pipe fitter. We connect the customer to the public issue, and from there they can see when it’s scheduled (and even when it gets delivered and by whom!) and if they choose, they can engage directly with the team responsible. This is unprecedented access into product development. It also allows Support to be smart about making the connection, but to give the ownership to the actual team responsible for delivering the work. Speaking of which, let’s talk about the Support Team right now.\n\n## How big is the team?\n\nWe are currently 12 global hooligans and we are looking for more. We are finding our volume of requests are best served by people based in EMEA -> East Coast America so we are targeting those regions to hire. This is great because everyone gets to work a “9-5,” but by leveraging remote work, we can easily get 24/5 coverage. This is huge.\n\nIf you are reading this and finding yourself interested in learning more, [we are hiring](/jobs/). We’d love to have you join our team if this sounds right for you.\n\nI’ll be writing more over the next months about how we stay connected remotely, how we communicate across teams, and how to make successful remote internships amongst other things. I hope you’ll enjoy the journey!\n\n-Lee\n",[9],{"slug":5962,"featured":6,"template":680},"support-engineering-at-gitlab","content:en-us:blog:support-engineering-at-gitlab.yml","Support Engineering At Gitlab","en-us/blog/support-engineering-at-gitlab.yml","en-us/blog/support-engineering-at-gitlab",{"_path":5968,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5969,"content":5975,"config":5981,"_id":5983,"_type":14,"title":5984,"_source":16,"_file":5985,"_stem":5986,"_extension":19},"/en-us/blog/switching-sides-in-security",{"title":5970,"description":5971,"ogTitle":5970,"ogDescription":5971,"noIndex":6,"ogImage":5972,"ogUrl":5973,"ogSiteName":667,"ogType":668,"canonicalUrls":5973,"schema":5974},"Switching “sides” in security","How does product security work differ from pen testing and hacking all the things?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679594/Blog/Hero%20Images/jason-polychronopulos-unsplash.jpg","https://about.gitlab.com/blog/switching-sides-in-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Switching “sides” in security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joern Schneeweisz\"}],\n        \"datePublished\": \"2020-10-23\",\n      }",{"title":5970,"description":5971,"authors":5976,"heroImage":5972,"date":5978,"body":5979,"category":698,"tags":5980},[5977],"Joern Schneeweisz","2020-10-23","\n \n{::options parse_block_html=\"true\" /}\n \n\n \n \n \n\nThe beginning of this month marked my first year working at GitLab. Before joining the GitLab team, I'd been doing security consulting and penetration testing for my entire career. I didn’t change jobs much until last year ... actually I haven't at all. I'd been happily hacking all the things over at [Recurity Labs](https://recurity-labs.com) since 2007.\n\nI would like to use my first anniversary here at GitLab to compare both sides, namely penetration testing and security consulting versus the product security side of security. Nowadays, I’m working on the [Security Research team](/handbook/security/#security-research) here at GitLab. A lot of my work is closely interwoven with the [Application Security](/topics/devsecops/) team: reviewing features and merge requests, and responding to pings asking for security advice. It appears a bit like in-house security consulting, but in reality, the work is much broader in general and I’ll outline the main differences here in this post.\n\n## Distractions\n\nI was a bit baffled when I was asked, ‘How do you keep state? How do you take notes about your projects?’ in the very first run of the Source Code Audit Training I delivered as a security consultant to some in-house security team. About a decade into the job at that point, I'd never thought about the massive distractions one might have being part of a product security team. It was a simple question: the team was wondering about my note keeping techniques. At this point I didn't have any good answer. I didn't have an external process to keep track of my projects. Why? Because I had the luxury of executing one project at a time; only one thing to hack, only one thing to focus on deeply for a week or two. I could just rely on my memory because I barely needed to context-switch. When the project was over, I dumped my findings into a report and was ready to move on to the next project. \n \nIn my current role, I’ve since adapted to the huge amount of context switching one needs to do in the day-to-day work. Though, I still need to find the perfect note taking solution for myself (if you have any cool pointers, just leave a comment with this post). And generally, having a greater variety of tasks and obligations during a week of work is something refreshing, at least for me. It allows me to switch topics in the event I’m stuck on something. Later on, I can switch back with a fresh mindset ready to tackle the problem, possibly with a new perspective.\n\n## Thinking broad vs. deep\n\nI was used to thinking very deeply when performing code reviews. And, during a pentest, you can dig really, really deep into the application you're assessing (please stay in scope though ;D). \n \nHowever, in product security you are delivered the output of that deep thought process. Often the job of the in-house application security engineers is to communicate security impact and consequences to engineering and product management teams; effectively switching from thinking deep to thinking broad. \n \nWhen I was writing assessment reports on the consulting side, I expected a certain, rather high level of security expertise on the receiving end. Now, on the product security side, the information shared has to be communicated to development and product management counterparts in a readily understandable manner. Suddenly, things need to be taken into consideration, which an external security consultant (luckily :sweat_smile: ) doesn't have to think about. This might be, for instance, product decisions or other non-technical aspects. This intersection of product security engineers and external pentesters is where friction can emerge. One side might disrespect or poke fun at the other side, due simply to the lack of some context or information the counterpart has. That being said: the \"other\" side typically isn't \"ignorant\" or less skilled, they just have another level of focus (deeper or broader, perhaps) and, most importantly, different priorities. \n \nBeing able to take-on the perspective of someone else is a great skill to have in almost any situation in life. That’s just a general take away. This being said, though, I’m not accusing any pentester of not possessing this skill -- it’s merely that they’re not expected to have this in the context of a pentest. Rather, it’s the deep level of technical abilities they’re hired for.  For me, the change was quite beneficial. The variety of tech stacks is lower here at GitLab; for instance, I don’t think I’ll see too much PHP or Java code to audit, but the broadened view beyond the horizon of technical questions was a trade worth making for me.\n\n## We're in the same boat\n\nBe it a security consultant doing a code review or an in-house application security engineer triaging and validating bug bounty submissions: they're on the same side. Ultimately, everyone wants to improve the security posture of whatever they're in charge of. For a pentester this \"thing they’re in charge of\" changes with every project they take. For in-house application security teams it's roughly the same product the whole time. While the goal is common, it is the work and the environment that can differ a lot. I personally am happy to have made the step to \"the other side\", working in product security now. It has given me the opportunity to approach security issues from new and, at least for me, unusual angles.\n\nPhoto by [Jason Polychronopulos](https://unsplash.com/@jpoly) from [Unsplash](https://www.unsplash.com).\n{: .note}\n",[720,1578,9],{"slug":5982,"featured":6,"template":680},"switching-sides-in-security","content:en-us:blog:switching-sides-in-security.yml","Switching Sides In Security","en-us/blog/switching-sides-in-security.yml","en-us/blog/switching-sides-in-security",{"_path":5988,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5989,"content":5995,"config":6000,"_id":6002,"_type":14,"title":6003,"_source":16,"_file":6004,"_stem":6005,"_extension":19},"/en-us/blog/tech-debt",{"title":5990,"description":5991,"ogTitle":5990,"ogDescription":5991,"noIndex":6,"ogImage":5992,"ogUrl":5993,"ogSiteName":667,"ogType":668,"canonicalUrls":5993,"schema":5994},"How to use DevOps to pay off your technical debt","Technical debt is a universal problem with an equally universal solution – DevOps. Here's how DevOps can reduce the tech debt burden and help you deploy faster and more frequently.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681643/Blog/Hero%20Images/greenery.jpg","https://about.gitlab.com/blog/tech-debt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use DevOps to pay off your technical debt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-10-05\",\n      }",{"title":5990,"description":5991,"authors":5996,"heroImage":5992,"date":5997,"body":5998,"category":787,"tags":5999},[672],"2020-10-05","\n\nOne of the primary resource constraints in the [DevOps](/topics/devops/) world is technical debt. Technical debt is a metaphor created by Ward Cunningham that compares the build-up of cruft (deficiencies in the internal quality of software systems) to the accumulation of financial debt, where the effort it takes to add new features is the interest paid on the debt, writes [Martin Fowler](https://martinfowler.com/bliki/TechnicalDebt.html).\n\nIt’s common for a busy developer to write code with known imperfections, but because the priority is to ship new features as quickly as possible, deliverables are often prioritized over correcting the inefficiencies in the process.\n\nOne of the major dilemmas with determining the value of spending precious time fixing cruft versus building new features is that the costs are not objectively measurable, says Fowler. Just like with paying off financial debt, the right call is largely circumstantial as opposed to absolute.\n\n\"Given this, usually the best route is to do what we usually do with financial debts, pay the principal off gradually,\" writes Fowler.\n\nBy cleaning up some of the cruft as you work on the new features, you ensure that the most relevant code is tidier for future iterations. When it comes to crufty, but stable, code, you can leave it alone. This method is similar to paying the monthly balance on a low interest rate loan – the impact is minimal.\n\n \"In contrast, areas of high activity need a zero-tolerance attitude to cruft, because the interest payments are cripplingly high,\" writes Fowler.\n\nOne way to start dealing with technical debt is to conduct a rough audit and triage your technical debt by \"interest rate\" – high interest rate cruft is addressed with the same priority as shipping new features, while medium-to-low interest rate cruft can be dealt with in a ratio that best suits your team’s situation, because dealing with your most urgent technical debt sooner rather than later will help you save resources in the long-term.\n\n## How tech debt accumulates in your workflow\n\nIt’s not just code that contains cruft. A lot of the time, we have cruft that slows down our engineering processes. When it comes to investing time and money into updating DevOps processes, it seems there is never enough of either resource.\n\n\"We don’t let our teams spend time on improving their process because we think it’s wasted effort,\" says [Brendan O’Leary](/company/team/#brendan), senior developer evangelist at GitLab. \"But if you can spend a day fixing some things that make your workflow inefficient, and you save an hour a week from now until eternity, that’s a big difference.\"\n\nTake for instance manual deployment versus the use of automated pipelines. We know that deploying manually takes an enormous amount of time, but the upfront cost of allocating time to building automated pipelines can seem daunting.\n\nIf your team is trapped in a time-consuming cycle of technical debt, take a peek at how Minnesota-based consulting firm, [BI Worldwide](/customers/bi_worldwide/) (BIW), was able to accelerate deployments by transitioning to GitLab. In the case study, the BIW Corporate Products Development Team explains how they were stuck in a rut of manual testing and manual deployments on their on-prem infrastructure. Their toolchains were complex and inefficient, which created a dense backlog.\n\n\"It was entirely time-consuming to apply all of those code changes,\" said Adam Dehnel, product architect, BIW, in the case study. As a result, deployments were infrequent and slow as too many features were crammed into each release.\n\nThe first step to increase the speed of their deployments was to update and modernize their processes.\n\n\"[BIW] had practices and tools in place at the time but were spending time on items that weren’t business differentiating features. They faced classic issues surrounding a lack of cross-team communication including inefficient mechanisms for intra-organization workflows and individualized toolsets.\"\n\nFirst, BIW made the painful transition from CVS to Git. Next, the company aimed to automate the build, test, and deployment process and built a toolchain with tools such as GitHub, Jenkins, JIRA, and Confluence.\n\nFor BIW, this complex toolchain was buggy. One thing that was not mentioned in this specific use case, but still merits recognition, is the hidden cost of maintaining all of these different tools.\n\n\"The argument to be made there is not only is it cost of using these various tools, but also that the more tools you have, there is the overhead cost of upgrading them, maintaining them, and integrating them,\" says Brendan. \"There’s a massive hidden cost behind the cost of doing business.\"\n\nIn the next iteration, BIW embraced the efficiency of an all-in-one tool by transitioning to GitLab.\n\nBIW went from a pre-Git pace of shipping a release every nine to 12 months to deploying nearly ten times a day using GitLab Ultimate, no doubt putting a serious dent in the technical debt that followed their slower, laborious release cycle.\n\n## Conserve valuable resources and pay off technical debt with DevOps\n\nIn a previous blog post, we examined [communication strategies to get non-technical stakeholders to buy-in to DevOps](/blog/devops-stakeholder-buyin/). DevOps can help you deploy faster and more frequently, giving your business an edge over the competition, but it is also a strategy for paying off your technical debt. By first taking into account inefficiencies in your code and engineering processes, you can make a rough triage of your team's technical debt. This type of audit is the first step to identifying cruft you can trim to help speed up your cycle time, clear your backlog, and modernize your engineering processes.\n\n## Read more\n\n- [Need DevOps buy-in? Here's how to convince stakeholders](/blog/devops-stakeholder-buyin/)\n- [A guide to cloud native storage for beginners](/blog/cloud-native-storage-beginners/)\n- [Want to iterate faster? Choose boring solutions](/blog/boring-solutions-faster-iteration/)\n\nCover Photo by [Vadim L](https://unsplash.com/@sk3tch?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/plants?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[9,831,1440,109],{"slug":6001,"featured":6,"template":680},"tech-debt","content:en-us:blog:tech-debt.yml","Tech Debt","en-us/blog/tech-debt.yml","en-us/blog/tech-debt",{"_path":6007,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6008,"content":6013,"config":6018,"_id":6020,"_type":14,"title":6021,"_source":16,"_file":6022,"_stem":6023,"_extension":19},"/en-us/blog/telstra-invests-in-gitlab",{"title":6009,"description":6010,"ogTitle":6009,"ogDescription":6010,"noIndex":6,"ogImage":2991,"ogUrl":6011,"ogSiteName":667,"ogType":668,"canonicalUrls":6011,"schema":6012},"Telstra Ventures invests in GitLab to boost innovation and collaboration","We’re excited to announce that Telstra Ventures has invested in GitLab!","https://about.gitlab.com/blog/telstra-invests-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Telstra Ventures invests in GitLab to boost innovation and collaboration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2018-04-16\",\n      }",{"title":6009,"description":6010,"authors":6014,"heroImage":2991,"date":6015,"body":6016,"category":299,"tags":6017},[950],"2018-04-16","\n\nTelstra Ventures, the investment arm of Australia’s leading telecommunications and technology company, has chosen to invest in GitLab for our open core DevOps philosophy that supports the entire development and operations lifecycle.\n\n“Customers are increasingly demanding better digital experiences, and DevOps is becoming the leading way for companies to develop, deliver, and support applications that drive great customer experiences,” said Mark Sherman, Managing Director at Telstra Ventures. “One of the reasons we decided to invest is because GitLab is committed to continuously improving its application, which is key to helping companies rapidly take their best ideas from development to market.”\n\n>“One of the reasons we decided to invest is because GitLab is committed to continuously improving its application, which is key to helping companies rapidly take their best ideas from development to market.”\n\nWe believe that a collaborative environment is necessary to take your best ideas to market. We know from our [2018 Global Developer Report](/developer-survey/previous/2018/) that a collaborative environment is important to you (94 percent of respondents said so!), but that visibility and transparency has some catching up to do. In addition, 55 percent of respondents are still using at least five tools for their development processes and 62 percent of respondents acknowledged losing time due to context switching between tools on a typical work day. This isn’t a good use of anyone’s time, which is why it’s our mission to deliver a single application that meets everyone’s needs.\n\nOur focus on a collaborative approach gives development, quality assurance, security, and operations teams the ability to concurrently work on the same project within a single application and to see the entire workflow from their own point of view. The same information’s all there – just presented in a way that’s relevant to each team. We published a blog post last year with more details of [our DevOps vision](/blog/devops-strategy/). This investment from Telstra is affirmation that we’re on the right track and will help accelerate our progress towards realizing this vision.\n\n“We look forward to partnering with Telstra to support its large application team and to aid the company in its vision of connecting people through technology,” said [Sid Sijbrandij](/company/team/#sytses), our CEO and co-founder. “DevOps is increasingly being adopted by organizations around the globe to radically improve productivity and the pace at which software moves from idea to market.”\n\nAs the only single software application that supports the entire DevOps lifecycle, GitLab is built from the ground up to enable collaboration amongst teams adopting the methodology. We’re happy that Telstra believes in our vision and our capability to enable software development teams to achieve faster DevOps lifecycles.\n",[9,675],{"slug":6019,"featured":6,"template":680},"telstra-invests-in-gitlab","content:en-us:blog:telstra-invests-in-gitlab.yml","Telstra Invests In Gitlab","en-us/blog/telstra-invests-in-gitlab.yml","en-us/blog/telstra-invests-in-gitlab",{"_path":6025,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6026,"content":6032,"config":6037,"_id":6039,"_type":14,"title":6040,"_source":16,"_file":6041,"_stem":6042,"_extension":19},"/en-us/blog/ten-devops-terms",{"title":6027,"description":6028,"ogTitle":6027,"ogDescription":6028,"noIndex":6,"ogImage":6029,"ogUrl":6030,"ogSiteName":667,"ogType":668,"canonicalUrls":6030,"schema":6031},"DevOps terminology: 10 terms that might surprise you","From Yoda to yaks and even baklava, here are 10 DevOps terms we’re betting you’ve never heard of.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681526/Blog/Hero%20Images/devopsterms.jpg","https://about.gitlab.com/blog/ten-devops-terms","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps terminology: 10 terms that might surprise you\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-08-25\",\n      }",{"title":6027,"description":6028,"authors":6033,"heroImage":6029,"date":6034,"body":6035,"category":787,"tags":6036},[869],"2020-08-25","\n\nYou call yourself a [DevOps professional](/topics/devops/build-a-devops-team/) but do you know the definitions of yak shaving, Yoda conditions or baklava code?\n\nWe didn’t think so.\n\n## Benefits of DevOps\n\nDevOps outpaces the old software development methodologies like waterfall simply because it’s more efficient. Here are eight obvious DevOps wins:\n\n* Deployment is faster\n\n* Product quality is better\n\n* Automation simplifies the whole process\n\n* There’s flexible, continuous delivery\n\n* Scalability is even easier to achieve\n\n* Teams are transparent and communicative\n\n* There are faster fixes for bugs and other problems\n\n* It gives space to constantly iterate\n\nRegardless of your role on a business or a technical side, there are DevOps benefits for everyone.\n\n## DevOps terms and team communication\n\nA basic understanding of DevOps terms is important when it comes to optimal team communication. Otherwise, there are a lot of blank, blinking faces in the crowd. But even more important than simply understanding the terminology is consciously practicing good communication about DevOps and iterating on your team’s communication style.\n\nNew ideas, tools, and processes are constantly cropping up in the DevOps space, which means there is new terminology to learn. Great team communication involves continuously helping each other keep up with new knowledge and ensuring an environment of continuous learning.\n\n## DevOps terms glossary\n\nHere’s a look at our [DevOps](/topics/devops/) glossary with a focus on 10 DevOps terms even seasoned pros might not have encountered. And if you think there are some obscure ones we missed, please tell us about it [here](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/8878). We are working on a comprehensive GitLab guide to DevOps terms.\n\n### Devops term 1: Baklava code\n\n[Baklava](https://en.wikipedia.org/wiki/Baklava) is a dessert made up of many layers of thin phyllo dough – which is notoriously difficult to work with. Baklava code is the same: Lots of thin layers of code which makes it too fragile to stand up to real world use.\n\n### DevOps term 2: Dark launch\n\nA dark launch usually refers to a partial or incomplete release of a feature or features without any announcement. This under-the-radar release is a way to gather performance and testing data without the pressure of public input, because the features haven’t actually been talked about.\n\n### DevOps term 3: Dead code\n\nCode is considered \"dead\" if it lives in a program but actually doesn’t do anything and/or contribute to results or performance. Generally [dead code should be removed](https://refactoring.guru/smells/dead-code) as it’s a potential waste of space and computational power.\n\n### DevOps term 4: Everything-as-code\n\nEverything-as-code takes [infrastructure-as-code](https://searchitoperations.techtarget.com/definition/Infrastructure-as-Code-IAC) and goes one step further: Literally everything is treated as code including the infrastructure, virtual machines, and deployment configuration, to name a few. Everything-as-code is made possible by cloud native, proponents of it say it boosts traceability, repeatability, and testing. \n\n### DevOps term 5: Fear-driven development\n\nForget [FOMO](https://www.urbandictionary.com/define.php?term=Fomo), fear-driven development is what happens when project managers raise the stakes by moving up deadlines or laying people off. \n\n### DevOps term 6: NoOps\n\nIt’s DevOps without the \"Ops\" or what could happen if automation eliminates traditional ops tasks. Some see NoOps as the highest evolution of a successful DevOps practice while others don’t see it that way at all. NoOps joins a slew of other Ops-related terms including [GitOps](https://thenewstack.io/what-is-gitops-and-why-it-might-be-the-next-big-thing-for-devops/), [CIOps](https://dzone.com/articles/kubernetes-anti-patterns-lets-do-gitops-not-ciops), and more.\n\n### DevOps term 7: Rubberducking\n\nThis novel way of debugging code was made famous in the book [The Pragmatic Programmer](https://www.amazon.com/Pragmatic-Programmer-journey-mastery-Anniversary/dp/0135957052/ref=sr_1_1?dchild=1&keywords=the+pragmatic+programmer&qid=1598365813&sr=8-1). A programmer carries around a rubber duck and discovers that by explaining the code to the duck, line by line, the errors made themselves obvious. Translated for the real world, and practiced at GitLab, it means talking through your code with another developer which helps make flaws or logical errors more obvious.\n\n### DevOps term 8: Spaghetti code\n\nIf someone tells you your code is like spaghetti don’t take it as a compliment. Spaghetti code is all over the map, often with too many [GOTO statements](https://www.geeksforgeeks.org/goto-statement-in-c-cpp/). It’s poorly organized and often lacks any kind of traditional structure. \n\n### DevOps term 9: Yak shaving\n\nDuring a global pandemic when many are working from home, it’s safe to assume yak shaving is happening frequently, and it’s definitely a term that is used [outside of programming](https://americanexpress.io/yak-shaving/). In general, it means doing something that leads to something else but has nothing to do with the original goal. Programmers use it to refer to interminable tasks that must be done before a project can move forward, as in, \"I’ll get to that once I’ve shaved the yak.\"\n\n### DevOps term 10: Yoda conditions\n\n*Code you I will Luke Skywalker.* Yoda conditions refers to non-traditionally written code, i.e., code written as [Yoda](https://starwars.fandom.com/wiki/Yoda) speaks. Once you put yourself in the mindset it’s possible to understand what you’re looking at, but, just like Luke Skywalker experienced, it can take a while to get the hang of this.\n\n_Some of these are terms in use at GitLab, but in our research we stumbled across [the Coding Horror blog](https://blog.codinghorror.com/new-programming-jargon/) created by Jeff Atwood and we found a few new-to-us terms including Yoda conditions. Jeff refers to his list as the \"top 30 Stack Overflow new programming jargon entries.\"_\n\n## Growth of a DevOps culture\n\nA DevOps culture doesn’t grow simply because an organization decides to implement it. It takes daily, focused effort and cultivation. Some things organizations can do to foster the growth of a DevOps culture are to keep leadership in the loop, openly communicate across the team, and create a roadmap of shared goals and individual responsibilities to help achieve them. Understanding the lingo helps too!\n\nCover image by [Raphael Schaller](https://unsplash.com/@raphaelphotoch) on [Unsplash](https://unsplash.com)\n{: .note}\n",[1440,9,723],{"slug":6038,"featured":6,"template":680},"ten-devops-terms","content:en-us:blog:ten-devops-terms.yml","Ten Devops Terms","en-us/blog/ten-devops-terms.yml","en-us/blog/ten-devops-terms",{"_path":6044,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6045,"content":6051,"config":6056,"_id":6058,"_type":14,"title":6059,"_source":16,"_file":6060,"_stem":6061,"_extension":19},"/en-us/blog/the-case-for-all-remote-companies",{"title":6046,"description":6047,"ogTitle":6046,"ogDescription":6047,"noIndex":6,"ogImage":6048,"ogUrl":6049,"ogSiteName":667,"ogType":668,"canonicalUrls":6049,"schema":6050},"The case for all-remote companies","Remote teams offer flexibility, reduce company costs, and increase productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668018/Blog/Hero%20Images/allremote.jpg","https://about.gitlab.com/blog/the-case-for-all-remote-companies","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The case for all-remote companies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-10-18\",\n      }",{"title":6046,"description":6047,"authors":6052,"heroImage":6048,"date":6053,"body":6054,"category":808,"tags":6055},[930],"2018-10-18","\nI’m writing this post while I sit under a mango tree and listen to\n[Tchaikovsky’s Piano Concerto No. 1](https://www.youtube.com/watch?v=xZYYqUssAVw).\nI don’t have to worry about my music bothering anyone around me, and I don’t\nthink twice about my attire (yoga pants, Beatles shirt, [Time-Turner](https://www.harrypottershop.com/products/time-turner-trade-by-noble-collection),\nand a pair of boots I'm trying to break in). I get to work wherever I’m most productive, because GitLab is an\n[all-remote company](/blog/the-remote-manifesto/). My 350 team members and\nI work wherever we’re most comfortable – whether that’s in a small cafe in\nUtrecht, Netherlands or in a bookstore in Santa Monica, California. We’re\npassionate about working remotely and believe that it has the power to\n[change the workforce](/company/culture/all-remote/#how-remote-work-is-changing-the-workforce).\n\n## Why all-remote works\n\nAll-remote organizations [empower team members](/company/culture/all-remote/benefits/) to work in settings that allow\nthem to balance their personal and professional lives. A completely remote\nenvironment allows organizations to retain team members as they move to be closer to parents,\ntravel the world, or follow their significant other if they have a job transfer. People don’t have to choose\nbetween their happiness and their career.\n\n> \"Remote working offers flexibility in every part of people’s lives. If you need\nto suddenly take care of your family or friends, the flexibility to travel to\nthem, move to them, be there when they need you there. And I think that's a\nreally beautiful thing.\" — Sid Sijbrandij\n\n1. **Equivalence**: The problem with hybrid setups, in which there are a few\nremote workers who collaborate with a larger on-site team, is that the remote\nteam feels isolated and often misses out on discussions. When there’s no HQ, no\none is in a satellite office and everyone's on equal footing, so no one is left\nout of impromptu meetings over lunch or quick brainstorming sessions down the hall.\n\n1. **Communication**: When everyone is remote,\n[effective communication](/handbook/communication/#introduction) becomes a\nnecessity, which helps instill good, scalable working practices. At GitLab, we\ndocument best practices in our [handbook](/handbook/) and we work in\n[issues](https://docs.gitlab.com/ee/user/project/issues/), allowing us to work\nasynchronously, which we need since we’re a global company with team members in\nevery time zone. Working in issues means our discussions are written, so we don't\nendure long meetings, which run the risk of team members forgetting information\nor decisions.\n\n1. **Hiring**: All-remote companies have an advantage over traditional work\nenvironments, because they can hire people irrespective of location, so they’re\nable to find the most talented people in the world rather than within a commutable\ndistance.\n\n1. **Cost-effective**: When you can hire around the world, you can pay market\nwages and offer people an at-market or above-market wage while still reducing\ncosts for the company. Furthermore, without office rent, an organization saves\na significant amount of money. GitLab, for example, has experienced\n[rapid growth](/jobs/) and would've had to move offices seven times in the last\nfew years. We save a significant amount of money on rent, utilities, office\nequipment, and additional team members to manage the office.\n\n## Overcoming the challenges\n\nThe biggest disadvantage to remote working is that isolation can set in if there\nisn't a concerted effort to create a social connection between people. In a\nco-located company, people can mingle in break rooms, sit together at lunch, and\nbriefly chat in hallways. At all-remote companies, the social fiber of a [culture\nhas to be actively cultivated](/company/culture/all-remote/building-culture/) and time must be set aside for it or team members\nwill feel alone in their work and disconnected from the organization.\n\nGitLab has [Group Conversations](/handbook/group-conversations/)\nevery day at the time when West Coast and Europe overlap. The most-wanted hours\nin the company to organize meetings are dedicated to talking about different\nareas of the company and learning how they're performing. We also do a\n[Company Call](/handbook/communication/#company-call) every day, which\ncomprises about five minutes of announcements and 25 minutes of people chatting.\n\nOur [Coffee Break Calls](/company/culture/all-remote/tips/#coffee-chats) encourage\nteam members to spend several hours a week socializing and building a relationship\nthat's separate from work. Since working remotely can also lead to team members\nnever meeting in person, we have a [visiting grant](/handbook/incentives/#visiting-grant)\nto cover transportation costs, and every nine months, the entire team gets\ntogether for the [GitLab Summit](/events/gitlab-contribute/).\n\nWhen I worked in-office, there was a stigma to wanting to chat with people,\nbecause my manager would wonder why I wasn't working. Now, my manager praises my\nability to connect with people. Our coffee chats give us permission to talk to\nteam members about anything.\n\n> \"Instead of it being a stigma,\nwe support it. We force you to do it when you onboard by asking you to set up\nfive coffee breaks with team members. It's totally legitimized, and everyone thinks it's acceptable. And, one thing I\nlike a lot is that it's personal. People tell stories, and sometimes they're fun,\nsometimes they're beautiful, sometimes they're really sad, and I love them all.\" -- Sid Sijbrandij\n\n## The investor perspective\n\nWe'll admit that investors have expressed concern about our dreamy all-remote\natmosphere. In considering GitLab, investors usually have these three concerns:\nwe don't match their pattern, whether the executive team has enough interaction,\nand the 50 percent loss in value in case of an acquisition. Investors are interested in\npattern-matching, and since the majority of their companies are traditional\nin-office organizations, investors are reluctant to deviate from what has\nhistorically worked well.\n\n![Sid responds to a Hacker News comment, writing that all-remote companies are the future and that one day, in-office companies will have to discuss why they are not remote](https://about.gitlab.com/images/blogimages/sidhn.png){: .shadow}\n  *\u003Csmall>Sid replies to a [Hacker News comment](https://news.ycombinator.com/item?id=18158896) about all-remote companies.\u003C/small>*\n\nWhen it comes to the executive team, investors wonder whether GitLab's leadership\nis able to effectively work together when they're distributed. Leadership needs\nhigh-bandwidth communication since they represent different functions, and in\nthe eyes of investors, remote cultures are not conducive to this level of interaction.\nOur executive team has quarterly in-person meetings and regular video calls.\n\nThe concerns about acquisition are true, but they help both investors and GitLab\ndetermine whether their goals are aligned. When a company gets acquired,\nespecially in the Bay Area, the presumption is that all the employees move to\nthe acquiring company. This would be hard in our case – people don't have work\nvisas, others are used to a remote lifestyle, and a lot of people just wouldn't\nwant to move. The industry estimate is that an all-remote team halves the value\nof a company in the case of an acquisition. Although this may sound terrifying\nto some, this fact helps us select the investors that believe in our goal: to\nbecome a [public company](/company/strategy/#sequence). So, if investors are interested\nin acquisition, investing with GitLab isn't the right move for them, because our\ngoals are misaligned.\n\n## Interested in changing the workforce?\n\nAn increasing number of the workforce wants to be a part of a remote team. One\nstudy found that\n[\"searches for flexible work arrangements is up 32 percent year over year,\"](https://www.hiringlab.org/2017/07/27/flexible-work-arrangements-searches-up/)\nan indication that the appeal of remote working is on the mind of jobseekers.\n\nIf you’re considering creating an all-remote environment, please borrow heavily\nfrom our 1,500-page [handbook](/handbook/)! We discuss which [tools](/handbook/tools-and-tips/)\nwe use, our [expense policy](/handbook/spending-company-money/), and our\n[onboarding template](https://gitlab.com/gitlab-com/people-ops/employment/blob/master/.gitlab/issue_templates/onboarding.md).\nIf you think of ways we can improve our remote working culture, we’d love it if\nyou [contributed](/company/strategy/#why) your thoughts!\n",[9,832],{"slug":6057,"featured":6,"template":680},"the-case-for-all-remote-companies","content:en-us:blog:the-case-for-all-remote-companies.yml","The Case For All Remote Companies","en-us/blog/the-case-for-all-remote-companies.yml","en-us/blog/the-case-for-all-remote-companies",{"_path":6063,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6064,"content":6070,"config":6075,"_id":6077,"_type":14,"title":6078,"_source":16,"_file":6079,"_stem":6080,"_extension":19},"/en-us/blog/the-cloud-native-all-remote-security-challenge",{"title":6065,"description":6066,"ogTitle":6065,"ogDescription":6066,"noIndex":6,"ogImage":6067,"ogUrl":6068,"ogSiteName":667,"ogType":668,"canonicalUrls":6068,"schema":6069},"The cloud-native, all-remote security challenge","What are the challenges and rewards of working in security at a cloud-native, all-remote company like GitLab?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670171/Blog/Hero%20Images/akshay-nanavati-Zq6HerrBPEs-unsplash.jpg","https://about.gitlab.com/blog/the-cloud-native-all-remote-security-challenge","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The cloud-native, all-remote security challenge\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2019-09-13\",\n      }",{"title":6065,"description":6066,"authors":6071,"heroImage":6067,"date":6072,"body":6073,"category":698,"tags":6074},[1010],"2019-09-13","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n\nWe sat down with GitLab security engineer Jayson Salazar to talk about the challenges of working in security ops in a cloud-native, all-remote company like GitLab and the security myth he thinks should be debunked.\n\n---\n\n\n**Name:** Jayson Salazar\n\n**Title:** Security engineer, [Security Operations](/handbook/security/#security-operations)\n\n**How long have you been at GitLab?** I joined GitLab in January 2019\n\n**GitLab handle:** [@jdsalaro](https://gitlab.com/jdsalaro)\n{: #tanuki-orange}\n\n**Connect with Jayson:** [LinkedIn](https://www.linkedin.com/in/jdsalaro/) / [Twitter](https://twitter.com/jdsalaro)\n\n![GitLab security engineer Jayson Salazar](https://about.gitlab.com/images/blogimages/jayson_salazar.jpg){: .shadow.medium.center}\n\n#### Tell us what you do here at GitLab:\nI work as a security engineer on our Security Operations team. We work around the clock providing technical and procedural feedback, improving our security capabilities, interfacing amongst diverse stakeholders and responding to incidents to keep GitLab — the company, its employees and all its products — secure.\n\n#### What’s the most challenging or rewarding aspect of your role?\nI believe that one cannot understand that which cannot be easily defined and located and furthermore, that one cannot secure that which isn’t understood. In short, visibility is everything, at both small and large scales and, in my opinion, every security engineer ought to have a picture of the environment that they are trying to protect that is as accurate and detailed as possible. \n\nTherefore, upon joining GitLab, I immediately tried to build a full-fledged mental map that bundled together the technologies, systems, ancillary artifacts and people with knowledge of them that GitLab leverages in daily operations. What I thought would  be an easy, and rather uneventful task proved to be much harder to accomplish than expected as the days, weeks and months progressed. \n\nConsidering how diverse GitLab’s technological stack is and how many moving parts it has given that we’re all-remote, multi-cloud, SaaS, open-source and 800 employees strong; building such a mental scheme in one sitting was definitely overly ambitious. As time has progressed, however, I've come to terms with the idea that my understanding of GitLab as a whole; including technical aspects, as well as our values and culture, would continue to improve and cement itself and that it wasn’t a trivial task I could assign a deadline to or rush along. As of today, I’m very comfortable working with and reasoning through the different moving parts that make up GitLab, and getting to this point has been both very rewarding but also quite challenging. \n\n\n#### And, what are the top 2-3 initiatives you’re currently focused on?\nOn the engineering side of my role, I’m focusing on architecting and implementing tools that improve our detection capabilities as a whole by allowing us to ingest, aggregate and build analysis and alerting pipelines around diverse and very interesting data sources. I’ve always been in love with data, hoarding it, slicing it, visualizing it and drilling down into it.  By doing this we, the Security Operations team, create powerful tools that our teams rely on to spot, track and address security issues faster. \n\nOn the less glamorous front, I am quite passionate about (as everyone on our Security Operations team is) improving our processes, documentation and providing feedback on technical issues that I care deeply about. Therefore, you’ll often find me raising issues related to the security of our different products, or their components, as well as dealing with accrued technical debt, contributing to our [Handbook](https://about.gitlab.com/handbook/security/) or creating both technical and procedural documentation that other GitLab employees can rely on. \n\n#### How did you get into security? \nAs a teenager, The Matrix was my favorite movie. The idea that rules and systems all around us existed for us to circumvent them really fascinated me. I gravitated towards “coding” because I wanted to recreate the weird unintelligible green terminal output shown on the screens of the Nebuchadnezzar. While in high-school some brief VBA and Excel explorations led me to Flash and Python. Before I knew it, I was spending my weekends during my freshman year in University frustrated but engaged playing wargames such as [Over The Wire](https://overthewire.org/wargames)/ and [Smash the Stack](http://smashthestack.org/). It was during that time that I started seriously considering a career in information security. Although I went on to explore other areas both professionally and academically, such as software development and data analysis, which to this day I still quite enjoy, I was always drawn back to security.\n\n#### What is the most significant piece of security advice you could provide to a colleague or friend? \nQuestion yourself and your abilities, always within reason and, as long as you can deal with the emotional pressure. You can, and will be, wrong. When that happens, having countermeasures in place that you put there because you assumed your judgement could have been wrong is going to help you and your team greatly. \n\nAs with any industry, professionals working in cybersecurity can become rusty and comfortable with their day-to-day work. One incident comes after the other, every design decision becomes the same, using TLS, salt and hash, using a proper authentication and authorization scheme, buzzwords here buzzwords there, magical-security box from provider X or Y will save us, and on and on. All of the sudden, best practices become dogmas, rules of thumb turn into mental barriers, generous budgets devolve into excuses for lack of architectural work and the cybersecurity professional has, single-handedly, killed his ability to do meaningful, impactful, truly interesting and creative work. That’s a big one in my opinion. Another is being careful with burnout, practice self-care and don’t become cynical. You’re in cybersecurity because you care, you don’t need to be a rockstar to contribute, and yes, what you do matters.\n\n#### From the perspective of your role, what’s GitLab doing better than anyone else in terms of security?\nAs an organization, we’ve quickly realized that, for security issues originating in artifacts that can be tracked and managed as code, it’s best to start looking for security issues early in the development process, before they materialize and carry real consequences, and not wait until the whole thing has been shipped. \n\nGitLab’s [Secure Team](https://about.gitlab.com/handbook/engineering/development/sec/secure/) is working on creating and improving features that help teams mitigate security-related problems in their codebases before they occur and can be discovered via traditional means. In my opinion, this is a very interesting and powerful mindset-shift, we’re going from “number of bugs discovered” to “number of bugs prevented”.\n\n#### What do you look forward to the most in security in the next 5 years?\nTo be honest, I’m not very thrilled about our collective future when it comes to cybersecurity. I believe some people greatly underestimate the complexity we’re facing while trying to secure the systems we’re building nowadays and this will become even more apparent in the next few years. It’s as if many companies are attempting to re-build their figurative planes mid-flight and that has the potential to backfire badly and affect customers and us all as a society; as it already has often in the past few years. However, I’m becoming increasingly optimistic as we’re seeing companies build out or empower their security teams to become more involved in the design and implementation phases of their infrastructure and, if applicable, their deliverables.\n\n#### Is there an area of security research you think deserves more attention? Why?\nSecurity analytics, and everything related to security analytics. Securing the internet for everyone little by little requires situational awareness, one of the best ways to get that is via data, lots of data. Said data will have to be gathered, stored, analysed and the related insights need to be shared. Privacy concerns aside, of which there are plenty, I’d like to see governments and public institutions gathering data about the number of systems they’ve updated in the last year, month or day, their patch levels, stacks they rely on, vulnerabilities they have fixed and much more. Imagine being able to rate the cybersecurity posture of a country as BB+ or AAA and aligning a nation’s (and by proxy its economy’s) cybersecurity efforts with financial success? Granted, this is just a random shower-thought I’ve had for a while but I think more research into “large scale security analytics and governance” could be an interesting endeavor. \n\n#### What mainstream or industry propagated security myth would you like to be better understood?\nThat all companies should migrate to the “cloud”, or leverage IaaS or PaaS providers to operate, because having your crown jewels “up there” is intrinsically more secure. Of course, I’m not advocating for sticking to routines of the old days where spinning up servers meant having metal boxes on-premise. After all, I do work at GitLab and believe in the way we have adopted agility and in the many merits of DevOps. However, it’s crucial to acknowledge that the skills and mindsets required to properly secure traditional computing environments are, in many cases, radically different to those needed to operate secure cloud environments. Therefore, I think companies, especially small- and medium-sized companies without the budget to call-in experts once problems arise, should carefully plan the terms on which they want to migrate on-premise systems to the common IaaS providers or data centers with similar offerings. Ultimately, I’d like to see companies putting more emphasis in training their workforce properly before setting migration processes in motion that could potentially increase their existing technical and security debt.\n\n## Now, for the questions you *really* want to have answered:\n\n\n#### What’s your favorite season? \nWinter, hands-down. Cold weather, clear skies, the anticipation of Christmas season, snow, meeting friends for coffee and fireplaces, what’s there not to like?\n\n#### What is that one food, you cannot live without?\nKorean cuisine, especially Bulgogi. If the world ever ends, let it be with me eating Bulgogi as the sun sets.\n\n#### When you’re not working, what do you enjoy doing/how do you spend your free time?\nI quite enjoy discussing politics and social developments, listening to electronic music and watching and discussing deep, and not-so-deep, movies. Blade Runner, V for Vendetta, Matrix, Ghost in the Shell, The Girl with the Dragon Tattoo, and 50 First Dates are all favorites of mine. \n\nOn the creative side of things, I really enjoy writing poems. The way they touch people and how they interpret them in ways I could have never anticipated. It’s also a hobby that has become more and more enjoyable the more I share it with others, both in person and online.\n\n#### Have a favorite quote?\nI have many favorite quotes, but not really one I can call a core tenet of my personal philosophy or that drives inspiration. There is, however, a poem by William Ernst Henley that I often share, discuss with friends, think about, and always find myself reading again, and again: [Invictus](https://www.poetryfoundation.org/poems/51642/invictus).\n\n\nPhoto by [Akshay Nanavati](https://unsplash.com/@anphotos?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}",[810,9,720,720],{"slug":6076,"featured":6,"template":680},"the-cloud-native-all-remote-security-challenge","content:en-us:blog:the-cloud-native-all-remote-security-challenge.yml","The Cloud Native All Remote Security Challenge","en-us/blog/the-cloud-native-all-remote-security-challenge.yml","en-us/blog/the-cloud-native-all-remote-security-challenge",{"_path":6082,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6083,"content":6088,"config":6093,"_id":6095,"_type":14,"title":6096,"_source":16,"_file":6097,"_stem":6098,"_extension":19},"/en-us/blog/the-continued-support-of-fluxcd-at-gitlab",{"title":6084,"description":6085,"ogTitle":6084,"ogDescription":6085,"noIndex":6,"ogImage":5460,"ogUrl":6086,"ogSiteName":667,"ogType":668,"canonicalUrls":6086,"schema":6087},"The continued support of FluxCD at GitLab","GitLab is committed to working with other partners to make sure that Flux remains a stable, reliable, and mature Cloud Native Computing Foundation project.\n","https://about.gitlab.com/blog/the-continued-support-of-fluxcd-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The continued support of FluxCD at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2024-03-05\",\n      }",{"title":6084,"description":6085,"authors":6089,"heroImage":5460,"date":6090,"body":6091,"category":675,"tags":6092},[2531],"2024-03-05","Last month, Weaveworks CEO Alexis Richardson [announced publicly](https://www.linkedin.com/posts/richardsonalexis_hi-everyone-i-am-very-sad-to-announce-activity-7160295096825860096-ZS67) the company, which is the main sponsor of FluxCD, is closing its doors and shutting down its commercial operations.\n\nGitLab made a strategic decision in early 2023 [to integrate FluxCD with its agent for Kubernetes offering](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/) as the recommended GitOps solution. While we were sad to see the news about Weaveworks, the company, it in no way changes our commitment to FluxCD, the project, and its ability to drive efficiencies for our customers. FluxCD is a mature, enterprise-ready GitOps solution with a modern, modular architecture and clean codebase that lends itself for integration and requires minimal maintenance.\n\nIn the past month, we have had discussions with a number of companies that built their tooling around FluxCD, and together we are certain that FluxCD is a solution we want to continue to support and rely upon. We looked into switching to alternatives, but decided against other options. We are confident in the future of Flux. Flux is a mature Cloud Native Computing Foundation (CNCF) project with a large and dedicated user base. We believe that our continued support and integration with Flux serves our users the best.\n\nUnfortunately, such an organizational change affects the status of the Flux maintainers. At GitLab, we are committed to open source. When we decided to integrate with Flux, we knew that, sooner or later, we would like to have FluxCD maintainers within GitLab. Given the recent changes, we are committed even more to playing an active role in the Flux community and we want to support FluxCD for enterprise customers.\n\n> “GitLab is a proven platform for software delivery, and I am really pleased to see their leadership standing up to help and support Flux. As the inventors of GitOps and FluxCD, I know that Weaveworks people and all our customers will want to see this. For my part, I’m more confident in the future of Flux than ever, and I’m happy to see GitLab being one of the companies working on enterprise Flux support.” - Alexis Richardson, CEO, Weaveworks\n\nAs these are turbulent times in the Flux community, we are working closely with other partners to make sure that Flux remains a stable, reliable, and mature CNCF project.\n\n> Read more about [our FluxCD integration](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/).\n",[2749,675,745,9],{"slug":6094,"featured":91,"template":680},"the-continued-support-of-fluxcd-at-gitlab","content:en-us:blog:the-continued-support-of-fluxcd-at-gitlab.yml","The Continued Support Of Fluxcd At Gitlab","en-us/blog/the-continued-support-of-fluxcd-at-gitlab.yml","en-us/blog/the-continued-support-of-fluxcd-at-gitlab",{"_path":6100,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6101,"content":6107,"config":6112,"_id":6114,"_type":14,"title":6115,"_source":16,"_file":6116,"_stem":6117,"_extension":19},"/en-us/blog/the-difference-transparency-makes-in-security",{"title":6102,"description":6103,"ogTitle":6102,"ogDescription":6103,"noIndex":6,"ogImage":6104,"ogUrl":6105,"ogSiteName":667,"ogType":668,"canonicalUrls":6105,"schema":6106},"The difference transparency makes in security","What happens when you lift the veil around security?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670826/Blog/Hero%20Images/orlova-maria-EF6z_6R94zQ-unsplash.jpg","https://about.gitlab.com/blog/the-difference-transparency-makes-in-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The difference transparency makes in security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2019-09-05\",\n      }",{"title":6102,"description":6103,"authors":6108,"heroImage":6104,"date":6109,"body":6110,"category":698,"tags":6111},[1010],"2019-09-05","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n\n***We sat down with manager of strategic security, Robert Mitchell to talk about the impact of human error, the exponential benefits of transparency in security and more.***\n\n---\n\n![Robert Mitchell Headshot](https://about.gitlab.com/images/blogimages/rmitchell.png){: .small.right.wrap-text} **Name:** Robert Mitchell\n\n**Title:** Manager, [Strategic Security](/handbook/security/#strategic-security)\n\n**How long have you been at GitLab?** I started in November 2018\n\n**GitLab handle:** [@gitlab-rmitchell](https://gitlab.com/gitlab-rmitchell)\n{: #tanuki-orange}\n\n**Connect with Robert:** [LinkedIn](https://au.linkedin.com/in/robert-mitchell-877472/)\n\n\n\n#### Tell us what you do here at GitLab:\nStrategic Security focuses on pro-active measures at scale that improve the security of GitLab for the company, the product or our customers. I develop and lead projects that improve or expand the security department’s capability to deliver a secure and reliable service. I also manage the security automation, threat intelligence and field security teams. \n\n#### What’s the most challenging or rewarding aspect of your role?\nGitLab moves so fast, every day is an adventure. I am constantly humbled and amazed at the level of talent within the company, and the energy that people bring to the table each day with the things they want to do. It’s immensely rewarding to me to be able to respond to our constant iterations, adding my own perspectives and experiences, and to be a part of the growth here. My biggest challenges are just keeping up with it all, for while GitLab is leading the world in managing remote work, timezones are difficult in any global organization, and working from Sydney, Australia means that the number of shared working hours I have with teams in the Americas and Europe is limited. \n\n#### And, what are the top 2-3 initiatives you’re currently focused on?\nI’ve been heavily involved in driving our [Zero-Trust Networking](https://about.gitlab.com/handbook/security/#zero-trust) initiative since starting at GitLab. The biggest area I’ve managed personally has been around our identity management and SaaS management processes. Identity and authentication are critical to us as an all-remote company - all our endpoint assets are remote and all our data is hosted in the cloud, so traditional infrastructure security controls don’t really apply to our security model. Therefore, ensuring that we have a strong and consistent method to identify users and ensure that we have visibility of where our data is critical to our business. Our [Zero Trust blog post series](/blog/evolution-of-zero-trust/) makes great reading on our progress. \n\n#### How did you get into security? \nI was on the periphery of the BBS scene in Australia in the late 80s/early 90s in Australia. While not involved in any of the shenanigans detailed in [Suelette Dreyfus’s excellent book about that era](http://www.underground-book.net/), the exploits of some of these characters were known to me at the time. I was always curious about what could and couldn’t be done on the Internet, but my formal involvement in IT Security really kicked off when I landed a job at Check Point Software in the late 90s. A lot has changed since the days when Firewalls, VPNs and stateful inspection were the key technologies, but many of the foundational principles from those days are still just as relevant today.\n\n#### What is the most significant piece of security advice you could provide to a colleague or friend? \nHuman error is the most significant cause of security problems. So many of the security breaches that have come to pass in recent years inevitably have an element where a person with good intentions has made a decision with dire consequences. So when thinking about Security, don’t just think about the cool hack or the clever technology. Most likely, the vulnerability will be a person who will make the mistake that causes a breach, so everything you can do to educate, inform and remove the potential for the human side of a system to fail will make the greatest difference. \n\nA simple example of this is passwords. A site like [https://haveibeenpwned.com/](https://haveibeenpwned.com/) is a sobering read for how often people don’t set passwords that are effective, and a common human error is using the same password in multiple places, for convenience. Progressively more complicated password policies are not really a good solution here (because users can just come up with a more complex password they re-use everywhere!), but implementing a second authentication factor that is dynamic (e.g. Google Authenticator) is a simple control that is relatively user-friendly, and makes a massive difference to the risk of a breach. \n\n#### From the perspective of your role, what’s GitLab doing better than anyone else in terms of security?\n>Transparency. Security has a tradition of encouraging secrecy and a culture of “need to know” which has discouraged collaboration and sharing of information for a long time. We are now seeing that allowing researchers and practitioners to share data about their knowledge and information has an exponential benefit, and that by being honest and transparent about the risks and problems that we have, we expose the problems more efficiently and ultimately get a better solution. While there is still a need to be responsible with disclosure and ensure that shared information does not expose people to unnecessary risks, GitLab is leading in showing that raising the veil around what is involved in securing a product and service actually results in a better quality product, and enhances trust rather than dilutes it. \n\n#### What do you look forward to the most in security in the next 5 years?\nThere is a definite generational change in the air, with the evolution of Security in DevOps and more people with a coding/automation background getting into the Security space. What interests me particularly, is seeing how those fresh eyes can look at existing challenges around enforcing security controls, and how to use new models to attack age-old problems like large-scale log analysis and intrusion detection and response. In our own team we’re starting some great experiments using machine learning to analyse traffic logs for indicators of abuse, with some great initial successes and an ultimate goal of automating both detection and response of abusive behaviours. From a GitLab perspective, that’s doubly exciting because the learnings we get from this are things that we can feed back into our platform, thus allowing all of our customers to benefit! \n\n#### Is there an area of security research you think deserves more attention? Why?\nI have a strong belief that the human side of security is often neglected by technical teams, and by research. There has been some great research into social engineering within the last 5-10 years, but a lot of it is focused on the offensive side of social engineering, and nowhere near enough on the blue/defense side. Understanding why people make mistakes and course-correcting is an area that I believe is seriously under researched, and in terms of real benefit would make a massive difference to our industry. One of the few papers in this space is [\"The psychology of scams\"](https://webarchive.nationalarchives.gov.uk/20140402205717/http://oft.gov.uk/shared_oft/reports/consumer_protection/oft1070.pdf) (warning, it’s a long read!) but if you know of good work in this area, I’d love to read it.\n\n## Now, for the questions you *really* want to have answered:\n\n\n#### What was the first computer you owned?\nAn Exidy Sorcerer! My father bought it when I was 7 years old. Killer Specs - 32KB (yes, KB!) RAM, Z-80 Processor, 2 (count them!) colours, no sound unless you did the parallel port mod (which we did, of course!). I taught myself BASIC and Assembler programming by copying programs by hand in books and finding all the typos. I still have a soft spot for vintage personal computers, we are spoiled by the amount of power we have available to us these days. \n\n#### Gif or Gif? (Gif vs Jif)\nGif. Obviously… \n\n#### What’s your favorite season?\nWinter. I love the cold, although Australian winters are pretty mild in comparison to other parts of the world. If I had to dig myself out of several feet of snow every day, I might change my mind!\n\n#### What is that one food, you cannot live without?\nI’m a pretty massive foodie, and particularly love South East Asian food (Malay, Thai, Indonesian). Making me choose one food is too hard, but a world without Beef Randang, Nonya dishes and Thai Curries is too sad to contemplate….\n\n#### When you’re not working, what do you enjoy doing/how do you spend your free time?\nI like to get out on my motorbike and go touring when time permits. The freedom of an open country road or a hill/mountain with a great twisty road is one of life’s great pleasures. It’s also a great way to meet interesting people and share their stories. \n\n\nPhoto by [Orlova Maria](https://unsplash.com/@orlovamaria?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}",[810,9,720,720],{"slug":6113,"featured":6,"template":680},"the-difference-transparency-makes-in-security","content:en-us:blog:the-difference-transparency-makes-in-security.yml","The Difference Transparency Makes In Security","en-us/blog/the-difference-transparency-makes-in-security.yml","en-us/blog/the-difference-transparency-makes-in-security",{"_path":6119,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6120,"content":6126,"config":6132,"_id":6134,"_type":14,"title":6135,"_source":16,"_file":6136,"_stem":6137,"_extension":19},"/en-us/blog/the-future-of-the-gitlab-web-ide",{"title":6121,"description":6122,"ogTitle":6121,"ogDescription":6122,"noIndex":6,"ogImage":6123,"ogUrl":6124,"ogSiteName":667,"ogType":668,"canonicalUrls":6124,"schema":6125},"The Future of the GitLab Web IDE","There are big changes in store for the Web IDE in the coming milestones.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679284/Blog/Hero%20Images/johannes-plenio-2TQwrtZnl08-unsplash.jpg","https://about.gitlab.com/blog/the-future-of-the-gitlab-web-ide","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The Future of the GitLab Web IDE\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Schurter\"}],\n        \"datePublished\": \"2022-05-23\",\n      }",{"title":6121,"description":6122,"authors":6127,"heroImage":6123,"date":6129,"body":6130,"category":675,"tags":6131},[6128],"Eric Schurter","2022-05-23","\nWay back in April 2018, [GitLab 10.7 introduced the Web IDE](/blog/introducing-gitlab-s-integrated-development-environment/) to the world and brought a delightful multi-file editor to the heart of the GitLab experience. Our goal was to make it easier for anyone and everyone to contribute, regardless of their development experience. Since its introduction, tens of millions of commits have been made from the Web IDE, and we've added features like [Live Preview](https://docs.gitlab.com/ee/user/project/web_ide/#live-preview) and [Interactive Web Terminals](https://docs.gitlab.com/ee/user/project/web_ide/index.html#interactive-web-terminals-for-the-web-ide) to enhance the experience. Now, we're excited to share some big changes we have in store for the Web IDE in coming milestones.\n\n## What makes an IDE?\n\nOver the years, we've learned a lot about how you all are using the Web IDE. We've [compared it to our Web Editor](https://about.gitlab.com/blog/a-tale-of-two-editors/) in the repository view. We've spoken to developers, designers, product managers, and technical writers alike. Almost universally, we hear that the Web IDE is great for small changes: a quick change to a config file, an update to a Markdown file, or a typo fix in a merge request. These lightweight changes make up the vast majority of the Web IDE usage. And for those use cases, it's super convenient and intuitive.\n\n![Editing a file in the current Web IDE](https://about.gitlab.com/images/blogimages/web-ide-diff-view.png)\n\nBut to grow, and to really earn the moniker “IDE,” what are we missing? What keeps developers from making more complex changes in the Web IDE? What can we do to elevate the experience? We hear about missing features and functionality like a [collapsible file panel](https://gitlab.com/groups/gitlab-org/-/epics/2585) that supports [contextual actions](https://gitlab.com/gitlab-org/gitlab/-/issues/197775) and drag and drop or [tighter integration with merge requests](https://gitlab.com/groups/gitlab-org/-/epics/2687). We've learned that there's no single feature that's a deal-breaker for most developers; it's the sum total of a lot of little user experience gaps.\n\nThe Web IDE is built on top of the fantastic open source project, [Monaco](https://microsoft.github.io/monaco-editor/). What made Monaco a great choice as the foundation of the Web IDE is also what makes it more difficult to address all these concerns holistically. Monaco is just that: a foundation. We have to implement all these workflows and features ourselves. Meanwhile, another open source project has been laser-focused on delivering a lovable IDE on top of Monaco.\n\n## Enter VS Code\n\nYou may have heard of [Visual Studio Code](https://code.visualstudio.com/), or VS Code. With its [dominant market share](https://insights.stackoverflow.com/survey/2021#section-most-popular-technologies-integrated-development-environment), chances are pretty good that you are even using it as your primary code editor. As it happens, [VS Code](https://github.com/microsoft/vscode) core is also open sourced under the MIT license. While the core project isn't a perfect drop-in replacement for the Web IDE, our Staff Frontend Engineer, [Paul Slaughter](/company/team/#pslaughter), wanted to see if we could run it inside GitLab.\n\nTurns out, we can:\n\u003Chttps://www.youtube.com/embed/_9G45TNR7VA>\n\nIn this video Paul Slaughter, Staff FE Engineer, walks Eric Schurter, Senior Product Manager, through the VS Code Web IDE proof of concept. See parts [two](https://www.youtube.com/watch?v=oyEFNOC1_Bo&list=PL05JrBw4t0KrRQhnSYRNh1s1mEUypx67-&index=9), [three](https://www.youtube.com/watch?v=1mTkNxrFXec&list=PL05JrBw4t0KrRQhnSYRNh1s1mEUypx67-&index=8), and [four](https://www.youtube.com/watch?v=qEiXtiInFIA&list=PL05JrBw4t0KrRQhnSYRNh1s1mEUypx67-&index=7) for closer looks at extensions, performance, and customization.\n\nAs you can see in the videos above, Paul was able to build a proof of concept that brings the VS Code editing experience into the GitLab UI, running entirely in the browser. No additional infrastructure needed.\n\nNext, we asked ourselves the question: Do we want to continue to invest in implementing custom features for the Web IDE that ultimately deliver the same value as those already available in VS Code? Or do we embrace VS Code inside GitLab, and invest in extending the experience to more tightly integrate with GitLab and the DevOps workflow?\n\n## Meet the new Web IDE\n\nAs you've probably already guessed, we've decided to [replace the current Web IDE with one built on top of VS Code](https://gitlab.com/groups/gitlab-org/-/epics/7683). In the coming milestones, we will build out custom support for the features not already available in the VS Code core, and validate that the workflows you already depend on in the Web IDE are handled in the new experience. We're working with the team that builds our amazing [GitLab Workflow extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow) for VS Code to make it available in the browser so we can bundle it in the Web IDE, and bring all those great features along for the ride. That includes [bringing merge request comments into the Web IDE](/blog/mr-reviews-with-vs-code/) for the first time ever!\n\n## Speaking of extensions\n\nYou read that right: extensions. One of the most compelling aspects of VS Code is the massive community and library of extensions available to customize your experience and integrate with other tools. A subset of [these extensions](https://open-vsx.org/) are already compatible with a web-based instance of VS Code, and our goal is to make them available in the Web IDE so you and your teams can work as efficiently and consistently as possible. Bringing extensions into the GitLab experience is not something we're taking lightly, so we'll be evaluating the best approach for ensuring the security and privacy of your data.\n\n## With great power comes great responsibility\n\nThis transition doesn't come without tradeoffs. We know that many of you appreciate the Web IDE for its simplicity, and it's safe to say that the increase in functionality VS Code brings to the table does come with an increase in complexity. The original Web IDE was introduced as a way to ensure that everyone can contribute. In keeping with that spirit, we will invest in improvements to our [core editing component](https://gitlab.com/groups/gitlab-org/-/epics/4861) that powers the [Web Editor](https://docs.gitlab.com/ee/user/project/repository/web_editor.html), Snippets, Pipeline Editor, and code editing elsewhere in GitLab. This core component will be extended to support multi-file editing. Our hope is that it actually serves those workflows that require simple edits even better than the Web IDE ever did.\n\n## I'm ready, when can I have it?\n\nWe're all excited to start using the new Web IDE as soon as possible. We're actively working on the integration and you can expect to see it sometime in the 15.x release cycle. If you would like to provide early feedback and help us fine tune the experience, please fill out this [short survey](https://forms.gle/S1vU5vkaEjE1NPMv9) to be considered for early access.\n\n## But wait, what about the runtime stuff?\n\nRemember at the beginning of this post when I asked what makes an IDE? The critical piece of the puzzle that VS Code is still missing is a runtime environment to compile your code. Without this environment, you can't generate real-time previews, run tests, or take advantage of code completion. We're looking to tackle this problem with the newly-formed [Remote Development category](/direction/create/ide/remote_development/), but that's a topic for a whole other blog post.\n\nUntil then, happy editing!\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\nCover image by [Johannes Plenio](https://unsplash.com/@jplenio?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[677,675,700,9],{"slug":6133,"featured":6,"template":680},"the-future-of-the-gitlab-web-ide","content:en-us:blog:the-future-of-the-gitlab-web-ide.yml","The Future Of The Gitlab Web Ide","en-us/blog/the-future-of-the-gitlab-web-ide.yml","en-us/blog/the-future-of-the-gitlab-web-ide",{"_path":6139,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6140,"content":6146,"config":6152,"_id":6154,"_type":14,"title":6155,"_source":16,"_file":6156,"_stem":6157,"_extension":19},"/en-us/blog/the-gitlab-handbook-by-numbers",{"title":6141,"description":6142,"ogTitle":6141,"ogDescription":6142,"noIndex":6,"ogImage":6143,"ogUrl":6144,"ogSiteName":667,"ogType":668,"canonicalUrls":6144,"schema":6145},"The GitLab handbook by numbers","Two GitLab team-members take a fresh look at GitLab's open source team handbook, charting its evolution over the years to the weighty tome it is today.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670434/Blog/Hero%20Images/handbook-cover.jpg","https://about.gitlab.com/blog/the-gitlab-handbook-by-numbers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The GitLab handbook by numbers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lukas Eipert\"},{\"@type\":\"Person\",\"name\":\"Lee Matos\"}],\n        \"datePublished\": \"2019-04-24\",\n      }",{"title":6141,"description":6142,"authors":6147,"heroImage":6143,"date":6149,"body":6150,"category":299,"tags":6151},[6148,4683],"Lukas Eipert","2019-04-24","\nSharing and retrieving information is a crucial part of everyday work life.\nWhere do you get information from, be it about hiring processes, social media guidelines, or reporting expenses?\nAt GitLab, all of that can be found in [the handbook](https://handbook.gitlab.com/) – have a look, it's public!\n[Sid](/company/team/#sytses), our CEO, [wrote about the importance and the open sourcing of our handbook][sid-blog-post] about two and a half years ago.\nBack then we were just shy of 100 employees.\nIn this post we will look at how the handbook has developed over time, how we interact with it,\nand how it still works for over 550 employees.\n\n[sid-blog-post]: /blog/our-handbook-is-open-source-heres-why/\n\n## One book to guide them all\n\nAt the time of writing, the handbook contains about 605,000 words.\nWhile probably a bit less captivating than the tales of Frodo and Middle Earth,\nwe have composed more pages than \"The Lord of the Rings\" and \"The Hobbit\" combined, since the [first commit][first-commit] in 2015.\nIt would take around 50 hours of continuous reading to cover the whole handbook, front to back.\n\n### Is it overwhelming to read through it all?\n\nIt would be, but as the handbook covers a wide range of topics, you probably don't need to read every single word.\nAs the handbook changes over time it is not necessary to memorize it all, but it is more important to remember how to retrieve information.\nSo as long as you know where to find something, you are on the safe side.\n\n> It would take around 50 hours of continuous reading to cover the whole handbook, front to back\n\n[first-commit]: https://gitlab.com/gitlab-com/www-gitlab-com/blob/2d2ced8f79da96fe981a3a6f6cf5918fa2dd992a/source/team-handbook/index.html\n\n## One book to be written by them all\n\n![Graph showing the growth of the handbook over time (May 2015 - April 2019)](https://about.gitlab.com/images/blogimages/evolution_handbook/handbook-history.png){: .shadow.center}\n*\u003Csmall>Graph showing the growth of the handbook, broken down by subcategory, over time (May 2015 – April 2019)\u003C/small>*\n\nCurrently all knowledge in the handbook is spread across 550 unique web pages, with the average page containing around 1,100 words.\nThe most words have been written in the subcategory engineering (138,000 words), with marketing a close second (115,000 words).\nTypically, as teams grow, more of their processes get documented in the handbook, which leads to a natural growth of the respective category.\n\n> The most words have been written in the subcategory engineering (138,000 words)\n\n### Who contributes to the handbook?\n\nYou might think that there is someone special who writes all those pages, but it's important\nto remember that [everyone can contribute](https://handbook.gitlab.com/handbook/company/mission/) to the handbook. It is actually part of our [onboarding process]\nto improve something about the handbook – whether that's clarifying wording or making it easier to find something.\nNothing is exempt from change; even [our core values are adjusted over the course of time][values-history].\n\n### How do you make changes to the handbook?\n\nIf someone at GitLab or from the wider community wants to change something, they follow a simple workflow that is familiar to every GitLab user:\n\n1. Create a merge request which introduces the change.\n2. Discuss the merge request with the stakeholders.\n3. Iterate on the change and come to an agreement.\n4. Let the merge request be merged.\n\nMore important changes (not every typo of course!) are then announced via Slack or our [company call].\nThe handbook also has its own [changelog] which you can check regularly to see what has been changed over time.\n\n[onboarding process]: https://handbook.gitlab.com/handbook/people-group/general-onboarding/\n[values-history]: https://gitlab.com/gitlab-com/www-gitlab-com/commits/master/source/handbook/values/index.html.md\n[company call]: https://handbook.gitlab.com/handbook/communication/\n[changelog]: https://handbook.gitlab.com/handbook/about/changelog/\n\n## One book to be read by them all\n\nIn 2018 we had several hundred thousand page views on pages in the handbook. It is hard to tell which views come from GitLab team-members and which from the wider community.\nAmong the most-read pages are our [Markdown Guide], the pages about [global compensation], our [values], the [hiring process], our [product], [benefits], and how to [communicate].\nThese pages are topics of general interest to people within and outside the company.\nWhat could be a better resource to potential candidates than those pages that show the inner workings of GitLab?\n\n### How do you find anything in the handbook?\n\nThe handbook has a search function; you can use the [index page](https://handbook.gitlab.com/) as an entry point, or just use your favorite search engine to find information.\nWhenever someone asks a question in our Slack, there is a high probability that someone will answer with a link to the handbook.\nIf someone asks a question that has no answer in the handbook, we highly encourage people to add that information to document it and make it easier for future GitLab team-members to find answers.\n\n> Whenever someone asks a question in our Slack, there is a high probability that someone will answer with a link to the handbook\n\n[Markdown Guide]: https://handbook.gitlab.com/handbook/markdown-guide/\n[global compensation]: https://handbook.gitlab.com/handbook/total-rewards/compensation/\n[product]: https://handbook.gitlab.com/handbook/product/\n[communicate]: https://handbook.gitlab.com/handbook/communication/\n[values]: https://handbook.gitlab.com/handbook/values/\n[benefits]: https://handbook.gitlab.com/handbook/total-rewards/benefits/\n[hiring process]: https://handbook.gitlab.com/handbook/hiring/\n\n## One book to be the future\n\nWe hope that this glimpse into the handbook is as interesting for you as it was for us.\nIn an all-remote company it is especially important to write everything down, so that no matter\nwhere you are in the world or what time zone you choose to work in, the information you need is accessible.\nAt the moment we are happy to say that we think that the handbook works as well for us now as it did with 100 employees.\nIt aligns with our [values] more than ever.\n\nFor us it is the most transparent way to collaborate on documentation of company internals.\nWe are able to efficiently iterate on topics, resulting in more in-depth coverage over time.\nPersonally the authors cannot see many reasons why the handbook should not be able to scale even further.\nEventually it will evolve further, from the three tomes we have today, to a digital encyclopedia.\nWe are definitely excited to see what the future holds!\n\nHave you taken inspiration from our handbook? Let us know by tweeting [@gitlab](https://twitter.com/gitlab).\n\nPhoto by [Beatriz Pérez Moya](https://unsplash.com/photos/XN4T2PVUUgk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/books?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[811,677,9,745],{"slug":6153,"featured":6,"template":680},"the-gitlab-handbook-by-numbers","content:en-us:blog:the-gitlab-handbook-by-numbers.yml","The Gitlab Handbook By Numbers","en-us/blog/the-gitlab-handbook-by-numbers.yml","en-us/blog/the-gitlab-handbook-by-numbers",{"_path":6159,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6160,"content":6165,"config":6170,"_id":6172,"_type":14,"title":6173,"_source":16,"_file":6174,"_stem":6175,"_extension":19},"/en-us/blog/the-many-routes-to-a-tech-career",{"title":6161,"description":6162,"ogTitle":6161,"ogDescription":6162,"noIndex":6,"ogImage":4544,"ogUrl":6163,"ogSiteName":667,"ogType":668,"canonicalUrls":6163,"schema":6164},"The many routes to a tech career","GitLab team members of different ages and backgrounds share their entry into this industry.","https://about.gitlab.com/blog/the-many-routes-to-a-tech-career","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The many routes to a tech career\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2022-10-04\",\n      }",{"title":6161,"description":6162,"authors":6166,"heroImage":4544,"date":6167,"body":6168,"category":808,"tags":6169},[1010],"2022-10-04","\nThe path to a career in technology isn’t always straight, particularly today. World and economic uncertainty, a lingering pandemic, a shift to remote work, and a need to do something that *matters* – all of these factors have caused sweeping changes in the broader workforce, in individual careers, and in the labor-shortage-plagued technology industry.\n\n## Tech career: Overview and insights\n\nEver wondered how to get into the tech world? To help try to make sense of it all, we asked three GitLab team members how they made their way into technology, and why they stay. Each has a different story to tell.\n\n### [Mark Loveless](https://gitlab.com/mloveless), Staff Security Engineer\n\nFollow Mark on [Twitter](https://twitter.com/simplenomad)\n\nI’ve been working since the age of 16 at various jobs, eventually gaining my first real tech job in 1990 as customer support at a call center. I had always had an interest in security and moved into more of a true security role in the mid-1990s, followed by my first security research job in 1999. For many in the security field, security research was fairly brand-new territory, so those of us who had been working for quite a while found ourselves reporting to individuals our own age or younger. Later on in my career this more or less became the norm, as my peers were almost always younger than me.\n\nI did, on occasion, run into prejudices involving my age, with the main two being as follows: \n- I was often overlooked for exploring new technologies as it was assumed I would not “get it.”\n\n- It was assumed that there was something wrong with me for not being in management. I love learning new things and am constantly exploring new technology. I’ve never had the desire to go into management as I preferred the independent contributor (IC) role. \n\nTo stay active and “keep up on the latest” whether it be the newest apps or what some weird meme means, well, Google is your friend. I try to stay active on at least some social media sites. I have friends and family who are much younger than me that I interact with a lot, and I ask a lot of questions. All of these steps have helped me substantially.\n\nIt is nice that when some new bit of tech comes out, I now have family and friends asking me what it's all about, and they certainly start asking if it is considered “safe” technology because they know my background. I’m fortunate that here at GitLab what knowledge I have is appreciated, no one assumes I can or cannot do something because of my age or because of preconceived ideas about what I might know at this point in my career.\n\n### [Juliet Wanjohi](https://gitlab.com/jwanjohi), Senior Security Engineer\n\nFollow Juliet on [Twitter](https://twitter.com/jay_wanjohi)\n\nI started in tech by undertaking a bachelor’s degree in Computer Science. I had an interest in software engineering before I decided to specialize in another area of interest: security. My goal was to blend my knowledge and skills in the two fields, and create a niche for myself as a security software engineer. I got the wonderful opportunity to be a part of the GitLab [Engineering Internship program](/handbook/engineering/internships/) and progressed on to become a full-time security engineer on the [Security Automation](/handbook/security/security-engineering/automation/) team in 2020. \n\nIt was both exciting and overwhelming to join such a distinguished, mature team while still being very green in the security field. I was among the youngest members of the team, which definitely drew out my imposter syndrome. Despite this, GitLab offered a welcoming environment where I felt comfortable and encouraged to bring my ideas forward, and contribute as any other team member would. About a year later, I was promoted to senior security engineer, highlighting the fact that number of years of experience does not necessarily translate to seniority; you also don’t have to be of a certain age to work at a certain level of a role. It all comes down to your skills, and a willingness to further your passion and be better at what you do. \n\nIn previous junior roles I had experienced negative effects of stereotypical thinking and unconscious bias, where my contributions were not valued because of my age. I was often overlooked when it came to opportunities to lead presentations or own projects. This made me feel like I had to work harder and put more pressure to prove myself “worthy.” Such occurrences should not discourage anyone who’s young and new to tech, but instead push you to confidently contribute your ideas, and look for ways to expand your reach by making the most of the networking and learning opportunities available to you. \n\nIt’s important to research and evaluate the culture of a company before joining it. Take a look at the initiatives the company carries out to increase awareness against these biases and the efforts to support those who are new to the field (whether they be due to age or career path). I feel lucky to be a part of GitLab, as there are [dedicated resources for team member career, growth, and development](/handbook/people-group/learning-and-development/career-development/#resources-for-team-members), including a newly created [Early Career Professionals Team Member Discussion Group](/company/culture/inclusion/tmdg-gitlab-early-career/). The group helps those that are early career professionals in the team by supporting their growth and increasing awareness in the organization around the challenges they face on a day-to-day basis.\n\n### [Pj Metz](https://gitlab.com/PjMetz), Education Evangelist\n\nFollow Pj on [Twitter](https://twitter.com/metzinaround)\n\nI made a transition into tech at 35 years old. I didn’t feel 35 when I started though because I had only just started learning about tech through coding a year before I started at GitLab. Instead, I felt 19 – brand-new and lost in a world in which I had no experience. \n\nAs a teacher, I was confident in my abilities in the classroom. I was, not to brag, a great English teacher. I was engaging, excited about the material, and worked hard to make it relatable and enjoyable for as many students as possible. Leaving after 11 years was not an easy choice, especially because my degrees felt suddenly useless. What other work could I possibly do with a Master’s degree in Secondary English Education?\n\nI joined GitLab as an Education Evangelist in our [Education Program](/handbook/marketing/developer-relations/community-programs/education-program/) and was able to draw on my former knowledge base, but not completely.\n\nAlthough I don’t have to code for my role, I have to know coding, which I had only started to learn in 2020 in between grading papers and working with a marching band at my high school. I also have to know how to talk to students and educators in a variety of concentrations. Computer Science, Information Systems, Business Analysis, and other degree programs are all looking to use [GitLab for Education](/solutions/education/), and I have to find ways to make it relevant for them.\n\nThis challenge has led to some of the hardest moments of my professional life. I can navigate an unmotivated teenager in class, a parent email about their child’s low grades that blames me, an administrator suddenly showing up for an observation, a drumline member who hasn’t figured out the rhythm for the halftime show opener, or an AP student stuck on analysis of the assigned article. However, this is different. The career I entered into is full of jargon and standards that were unfamiliar to me.\n\nI had a lot to learn. What are stock options? What is Slack? How do I structure my time if there isn’t a bell ringing to let me know the beginning and end of class? What is an expense report? People expect someone my age to know these things already.\n\nI have a sticker on my laptop case that looks like the kind you’d get at a small meetup, the kind that says “HELLO, I’m...” and then there is a space to write your name. This sticker says: “Hello, I’m Still Learning.” I have this not so people can lower their expectations of me; instead, its purpose is to highlight that we should all still be learning and I’m going to be open about what I don’t know. I’m doing my best to turn my perceived shortcomings into strengths by bringing a mindset of [iteration](https://handbook.gitlab.com/handbook/values/#iteration) to my work, something GitLab helped me realize was important. \n\nI’m still learning, and feel so far behind some of my colleagues, but GitLab and my team have worked hard to create a space for me to feel comfortable while I work through this career change. It helps that my manager is also a former educator, so she understands the change from education to the corporate world.\n\nShe reminds me to take time for myself after each conference or lecture. My onboarding buddy still meets with me regularly to help me work through something technical or to give advice about a project I’m working on. Every opportunity to connect with people as a person, whether through a [coffee chat or the “Donut-be-strangers” Slack bot](/company/culture/all-remote/informal-communication/#coffee-chats), which matches me with another, random team member, helps me remain grounded in the humanity of my work. Every team meeting I’m in has a reminder of the importance of taking time for ourselves, and a section in the agenda to cheer each other’s accomplishments. I couldn’t ask for a better place to have my first non-teaching job. \n\n### What’s your story? \n\nHow’d you get into tech? Make any pit stops along the way, or have you always been working in this industry? Let us know in the comments field. Also, if you are considering GitLab as your next step, check out our handbook to learn more about [our culture](/company/culture/), and then take a peek at our [open roles](/jobs/all-jobs/)!\n",[810,9],{"slug":6171,"featured":6,"template":680},"the-many-routes-to-a-tech-career","content:en-us:blog:the-many-routes-to-a-tech-career.yml","The Many Routes To A Tech Career","en-us/blog/the-many-routes-to-a-tech-career.yml","en-us/blog/the-many-routes-to-a-tech-career",{"_path":6177,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6178,"content":6184,"config":6189,"_id":6191,"_type":14,"title":6192,"_source":16,"_file":6193,"_stem":6194,"_extension":19},"/en-us/blog/the-on-call-handover-at-gitlab",{"title":6179,"description":6180,"ogTitle":6179,"ogDescription":6180,"noIndex":6,"ogImage":6181,"ogUrl":6182,"ogSiteName":667,"ogType":668,"canonicalUrls":6182,"schema":6183},"How our production team runs the weekly on-call handover","Senior Production Engineer John Jarvis explains our handover process for on-call incidents in a fully remote and distributed team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678661/Blog/Hero%20Images/production-on-call-handover.jpg","https://about.gitlab.com/blog/the-on-call-handover-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How our production team runs the weekly on-call handover\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jarvis\"}],\n        \"datePublished\": \"2018-03-14\",\n      }",{"title":6179,"description":6180,"authors":6185,"heroImage":6181,"date":6186,"body":6187,"category":743,"tags":6188},[3325],"2018-03-14","\nHow do you manage on-call incidents among a team of eight distributed across three time zones?\nEvery week, production engineers are assigned to the role of handling on-call.\nWith this, comes the [expectation][on-call-expectations] of being available to\nrespond to any issue that results in a critical alert. Additionally,\non-call individuals act as an umbrella for\nother members of the team by triaging and handling all issues\nrelated to GitLab.com infrastructure.\n\n\u003C!-- more -->\n\nThe production team structures on-call shifts so that they follow the sun, to\navoid waking up members of the team in the middle of the night.\nThis works well for GitLab's [remote-only culture](/company/culture/all-remote/) where there are engineers in multiple\ntime zones. Occasionally, an on-call engineer will need to respond to an issue\noutside normal working hours; in these situations, GitLab encourages members to take\n[time off][on-call-time-off] after your shift to recover.\n\n## The on-call handover\n\nAs the team members working on-call shifts are distributed and their working hours don't always overlap, you can see how it would be easy for things to slip through the cracks between one shift and the next. To prevent this happening, once a week, the production team holds a 30-minute meeting called the [on-call handover][on-call-handover].\nOne of the key tenets of GitLab is that [everything starts with an issue][start-with-an-issue], and\nthe on-call handover is no exception!\nFrom a generated report, the team reviews incidents that occurred during the\nlast seven days and decide whether they need additional attention or escalation.\n\nAfter that, we check all GitLab issues with the on-call label to see if there are\nany that need to move from the current shift to the next one. At the end, there\nis a brief review of seven-day graphs. These help us keep an eye out for anything\nanomalous in our key metrics. If there is anything that seems\nout of the ordinary or warrants further investigation, the team will dig into them to see if we can\nidentify the root cause. The production team at GitLab encourages leads of other\ngroups to attend the review, as this helps bring to our attention any particular high-priority\nitems specific to individual services.\n\n## Automating the on-call handover\n\nDrinking our own wine by using GitLab for on-call report generation has proven to\nbe a good way to automate some of the more tedious work of the handover.\nTo aid with this, the production team developed a program\ncalled the [on-call robot assistant][on-call-robot-assistant]. It pulls data\nfrom relevant sources such as PagerDuty, Grafana and GitLab itself to generate a\nreport with a GitLab issue.\n\nThe program automates the following tasks:\n\n* Pulling the last shift's incidents from PagerDuty\n* Generating issue stats from the [production backlog][production-backlog]\n* Display seven-day graphs for the key performance metrics that we are monitoring\n  that are sourced from [GitLab Prometheus][gitlab-prometheus] monitoring\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/oncall-robot-tty.gif\" alt=\"oncall-tty\" class= \"shadow\" style=\"width: 600px;\"/>\u003C/center>\n\n*\u003Csmall>Generating an on-call report in a GitLab issue\u003C/small>*\n\nThese data sources are set in a [simple configuration file][ocr-config], making it\neasy to iterate as we add new metrics to monitor.\nAt GitLab, most of what we do is out in the open so our on-call handover reports are\navailable for anyone to check out. If you want to see previous reports from\nthe on-call handovers [check them out in our issue tracker][on-call-reports].\n\nFor example, here is one recent report that shows a report for a previous week:\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/oncall-robot-report1.png\" alt=\"oncall-report1\" class= \"shadow\" style=\"width: 600px;\"/>\u003C/center>\n\nAs well as some graphs for key metrics the production team is monitoring:\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/oncall-robot-report2.png\" alt=\"oncall-report2\" class= \"shadow\" style=\"width: 600px;\"/>\u003C/center>\n\nWhen the team is finished reviewing the report, the current on-call engineer closes it\nand the shift officially ends.\n\n[Photo](https://unsplash.com/photos/ocs8x33bpMA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Denny Müller on [Unsplash](https://unsplash.com/search/photos/telephone?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n\u003C!-- identifiers -->\n\n[on-call-expectations]: /handbook/on-call/#expectations-for-on-call\n[on-call-time-off]: /handbook/paid-time-off/#a-gitlabbers-guide-to-time-off\n[start-with-an-issue]: /handbook/communication/#everything-starts-with-an-issue\n[on-call-robot-assistant]: https://gitlab.com/gl-infra/oncall-robot-assistant\n[production-backlog]: https://gitlab.com/gitlab-com/infrastructure/issues\n[gitLab-prometheus]: https://docs.gitlab.com/ee/administration/monitoring/prometheus/\n[ocr-config]: https://gitlab.com/gl-infra/oncall-robot-assistant/blob/master/oncall-settings-example.yaml\n[on-call-reports]: https://gitlab.com/gitlab-com/infrastructure/issues?scope=all&utf8=%E2%9C%93&state=closed&label_name[]=oncall%20report\n[on-call-report-example]: https://gitlab.com/gitlab-com/infrastructure/issues/3583\n[on-call-handover]: /handbook/engineering/infrastructure/team/reliability/on-call-handover/\n",[9,2396],{"slug":6190,"featured":6,"template":680},"the-on-call-handover-at-gitlab","content:en-us:blog:the-on-call-handover-at-gitlab.yml","The On Call Handover At Gitlab","en-us/blog/the-on-call-handover-at-gitlab.yml","en-us/blog/the-on-call-handover-at-gitlab",{"_path":6196,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6197,"content":6203,"config":6208,"_id":6210,"_type":14,"title":6211,"_source":16,"_file":6212,"_stem":6213,"_extension":19},"/en-us/blog/the-road-to-gitaly-1-0",{"title":6198,"description":6199,"ogTitle":6198,"ogDescription":6199,"noIndex":6,"ogImage":6200,"ogUrl":6201,"ogSiteName":667,"ogType":668,"canonicalUrls":6201,"schema":6202},"The road to Gitaly v1.0 (aka, why GitLab doesn't require NFS for storing Git data anymore)","How we went from vertical to horizontal scaling without depending on NFS by creating our own Git RPC service.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670092/Blog/Hero%20Images/road-to-gitaly.jpg","https://about.gitlab.com/blog/the-road-to-gitaly-1-0","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The road to Gitaly v1.0 (aka, why GitLab doesn't require NFS for storing Git data anymore)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Zeger-Jan van de Weg\"}],\n        \"datePublished\": \"2018-09-12\",\n      }",{"title":6198,"description":6199,"authors":6204,"heroImage":6200,"date":6205,"body":6206,"category":743,"tags":6207},[3364],"2018-09-12","\nIn the early days of [GitLab.com](https://gitlab.com), most of the application,\nincluding Rails worker processes, Sidekiq background processes, and Git storage,\nall ran on a single server. A single server is easy to deploy to and maintain.\nThe same structure is what most smaller GitLab instances still use for their\nself-managed [Omnibus](https://docs.gitlab.com/omnibus/) installation. Scaling\nis done vertically, meaning; adding more RAM, CPU, and disk space.\n\n## Moving from vertical to horizontal scaling\n\nSoon we ran out of options to continue scaling the system vertically, and we had\nto move to scaling horizontally by adding new servers. To have the repositories\navailable on all the nodes, NFS (Network File System) was used to mount these to each application\nserver and background workers. NFS is a well-known technology for sharing file\nsystems across a network. For each server, each storage node needed to be\nmounted. The advantage: GitLab.com could keep adding more servers and scale. However NFS\nhad multiple disadvantages too: the visibility is decreased to what type of file\nsystem operation is performed. Even worse, one NFS storage node's outage impacted\nthe whole site, and took the whole site down. On the other hand, Git operations\ncan be quite CPU/IOPS intensive too, so we began a balancing act between adding more nodes,\nand thus reducing reliability, versus scaling nodes vertically.\n\n## Considering NFS alternatives\n\nOver two years ago, we started to look for alternatives. One of the first ideas\nwas to remove the dependency on NFS with [Ceph](https://ceph.com/).\nCeph is a distributed file system that was meant to replace NFS in an\narchitecture like ours. Like NFS, this would solve our scaling problem on the\nsystem level, meaning that little to no changes would be required to GitLab as\nan application. However, running a Ceph cluster in the cloud didn't have the\nperformance characteristics that were required. Briefly we flirted with the idea\nof [moving away from the cloud][no-cloud], but this would have had major implications\nfor our own infrastructure team, and given that many of our customers _do_ run in\nthe cloud, [we decided to stay in the cloud][yes-cloud].\n\n[no-cloud]: /blog/why-choose-bare-metal/\n[yes-cloud]: /2017/03/02/why-we-are-not-leaving-the-cloud/\n\n## Introducing Gitaly\n\nSo it was clear that the application needed to be redesigned, and a new service\nwould be introduced to handle all Git requests. We named it\n[Gitaly](https://gitlab.com/gitlab-org/gitaly).\n\n![Gitaly Architecture Diagram](https://about.gitlab.com/images/gitaly_arch.png){: .large.center}\n*\u003Csmall>The planned architecture at the project start\u003C/small>*\n\nAs the diagram shows, the new Git server would have a number of distinct clients.\nTo make sure the protocol for the server and its clients is well defined,\n[Protocol Buffers][protobuf] was used. The client calls are handled by\nleveraging [gRPC][grpc]. Combined, they allowed us to iteratively add RPCs and\nmove away from NFS, in favor of an HTTP boundary. With the technologies chosen,\nthe migration started. The ultimate goal: v1.0, meaning no disk access was\nrequired to the Git storage nodes for [GitLab.com](https://gitlab.com).\n\nShipping such an architectural change should not influence the performance, nor\nthe stability of the self-managed installations of GitLab, so for each RPC a [feature\nflag](https://docs.gitlab.com/ee/development/feature_flags/index.html) gated the use of it. When the RPC had gone through a series of tests on both\ncorrectness and performance impact, the gate was removed. To determine stability we used\n[Prometheus](https://docs.gitlab.com/ee/administration/monitoring/prometheus/) for monitoring and the ELK stack for sifting through massive numbers of structured log messages.\n\nThe server was written in Go, while the application is a large Rails monolith.\nRails had a great amount of code that was still very valuable. This code got\nextracted to the `lib/gitlab/git` directory, allowing easier vendoring. The idea\nwas to start a sidecar next to the Go server, reusing the old code. About once a week the\ncode would be re-vendored. This allowed Ruby developers on other teams to\nwrite code once, and ship it. Bonus points could be earned if [the boilerplate code][gitaly-ruby]\nwas written to call the same function in Ruby!\n\n[protobuf]: https://developers.google.com/protocol-buffers/\n[gitaly-ruby]: https://gitlab.com/gitlab-org/gitaly/blob/232c26309a8e9bef61262ccd04a8f0ba75e13d73/doc/beginners_guide.md#gitaly-ruby-boilerplate\n[grpc]: https://grpc.io/\n\nThe new service wasn't all sunshine and rainbows though, at times it felt like\nthe improved visibility was hurting our ability to ship. For example, it became\nclear that the illusion of an attached disk created\n[N + 1 problems][rails-eager-loading]. And even though this is a well-known problem\nin Ruby on Rails, the tools to combat it are all tailored toward using it with\nActiveRecord, Rails' ORM.\n\n[rails-eager-loading]:https://guides.rubyonrails.org/active_record_querying.html#eager-loading-associations\n\n## Nearing v1.0\n\nWith each RPC introduced, v1.0 was getting closer and closer. But how could we be\nsure everything was migrated before unmounting all NFS mount points? A trip\nswitch got introduced, guarding the details required to get to the full path of each\nrepository. Without this data there was no way to execute any Git operation\nthrough NFS. Luckily, the trip switch never went off, so now it was clear NFS\nwasn't being used. The next step was unmounting on our staging environment! Again, this was very\nuneventful. Leaving the volumes unmounted for a full week, and not seeing any\nindication of unexpected errors, the logical next step was our production instance.\n\nDays later we started rolling out these changes to production: first the\nbackground workers were unmounted, than we moved onto higher impact services. At\nthe end of the day, all drives were unmounted without customer impact.\n\n## What's next?\n\nSo, where is this v1.0 tag? We didn't tag it, and I don't think we will. v1.0 is\na state for our Git infrastructure, and a goal for the team, rather than the code base.\nThat being said, the next mental goal is allowing all customers to run without NFS.\nAt the time of writing, some features like administrative tasks, aren't using Gitaly just\nyet. These are slated for [v1.1][gitaly-11], and our next objective.\n\nWant to know more about our Gitaly journey? Read about [how we're making your Git data highly available with Praefect](/blog/high-availability-git-storage-with-praefect/) and [how a fix in Go 1.9 sped up our Gitaly service by 30x](/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/).\n{: .alert .alert-info .text-center}\n\n[gitaly-11]: https://gitlab.com/groups/gitlab-org/-/epics/288\n\nPhoto by [Jason Hafso](https://unsplash.com/photos/8Sjcc4vExpg) on Unsplash\n{: .note}\n",[9,1297,1295],{"slug":6209,"featured":6,"template":680},"the-road-to-gitaly-1-0","content:en-us:blog:the-road-to-gitaly-1-0.yml","The Road To Gitaly 1 0","en-us/blog/the-road-to-gitaly-1-0.yml","en-us/blog/the-road-to-gitaly-1-0",{"_path":6215,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6216,"content":6222,"config":6227,"_id":6229,"_type":14,"title":6230,"_source":16,"_file":6231,"_stem":6232,"_extension":19},"/en-us/blog/the-security-tightrope",{"title":6217,"description":6218,"ogTitle":6217,"ogDescription":6218,"noIndex":6,"ogImage":6219,"ogUrl":6220,"ogSiteName":667,"ogType":668,"canonicalUrls":6220,"schema":6221},"The security tightrope: balancing security with ease-of-use","How do you balance user experience with the friction that’s introduced when trying to keep something secure?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680866/Blog/Hero%20Images/architecture-boulder-city-cityscape-220759.jpg","https://about.gitlab.com/blog/the-security-tightrope","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The security tightrope: balancing security with ease-of-use\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2019-11-07\",\n      }",{"title":6217,"description":6218,"authors":6223,"heroImage":6219,"date":6224,"body":6225,"category":698,"tags":6226},[1010],"2019-11-07","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n\nWe sat down with GitLab security engineer Shawn Sichak to talk about the challenging act of balancing user experience (convenience!) with the friction that’s introduced when trying to keep something secure. \n\n![Shawn Sichak Headshot](https://about.gitlab.com/images/blogimages/ssichakbw.png){: .small.right.wrap-text} **Name:** Shawn Sichak\n\n**Title:** Security Engineer, Security Operations\n\n**How long have you been at GitLab?**: I joined October 2018\n\n**GitLab handle:** [@ssichak](https://gitlab.com/ssichak)\n\n**Connect with Shawn:** [LinkedIn](https://www.linkedin.com/in/shawnsichak) / [Twitter](https://twitter.com/shawnsichak/)\n\n\n\n#### Tell us what you do here at GitLab:\nAs part of the Security Operations team, I’m involved in events ranging from incident response and log analysis, to the development of tooling and automation to help contribute to and improve the security of the GitLab products and GitLab.com services.\n\n#### What’s the most challenging or rewarding aspect of your role? \nThere is a balancing act in security between user experience (convenience!) and the friction introduced while trying to keep something secure. Friction creates drag, and drag slows progress. You want to help keep people and the company as secure as possible without unnecessarily getting in their way or slowing down their work.\n\nI find that challenge incredibly interesting. When you are able to develop automation or other methods of enabling people to do the right (secure) thing by default, it’s a very rewarding feeling.\n\n#### And, what are the top 2-3 initiatives you’re currently focused on? \nSimilar to my colleague and fellow security engineer [Jayson Salazar's response](https://about.gitlab.com/2019/09/13/the-cloud-native-all-remote-security-challenge/), I’ve been working on developing and implementing new ideas and tooling around helping our team gain deeper visibility into more of the domains here at GitLab.\n\nWe are moving towards a more proactive approach to security response, where automation can help us perform actions in a consistent and repeatable manner, helping the Security team scale. We are laying the groundwork now for much bigger things to come by aggregating, analyzing, and alerting on many diverse data sources so that the outputs can then be fed into further automated response pipelines.\n\n#### What is the most significant piece of security advice you could provide to a colleague or friend? \nIt’s pretty simple and common advice (so common that [Paul](/blog/ask-gitlab-security-paul-harrison/), [Alexander](/blog/ask-gitlab-security-alexander-dietrich/) and [Alex](https://medium.com/gitlab-magazine/how-we-use-automation-to-scale-up-security-at-gitlab-f8440574e0e4) cite it as their go-to security advice), but not heeded often enough:  utilize unique passwords per service and set up a password manager to help generate, store, and access them as needed. Also, enabling two-factor authentication (2FA) everywhere it is available. Every additional step you take to make it more difficult for an attacker will significantly decrease the chance of your accounts being compromised.\n\nBut I’d also recommend giving Bruce Schneier’s excellent [article](https://www.schneier.com/blog/archives/2008/03/the_security_mi_1.html) on ‘The Security Mindset’ a read. While the goal isn’t to give everyone a cynical view of the world, I think understanding the mindset and thought process from an attacker’s perspective can be incredibly beneficial while trying to keep yourself (and others!) secure.\n\n#### What is the most important emerging trend you see in security?  \nIt is refreshing to see security become less of a walled garden and more incorporated into other areas of software and systems development. Taking development techniques and best practices from software/systems engineering and integrating them into security workflows has already been producing some exciting new tooling and ideas (SOAR, [compliance-as-code](https://about.gitlab.com/2019/08/19/get-started-compliance-as-code/), etc).\n\nI think continued advancement in areas that better enable security teams to “scale” are going to be incredibly important. Whether that be through the use of automation or more actionable data; security teams are going to need to be creative to keep up with the pace of change/development and the ever growing amount of data to analyze.\n\n#### From the perspective of your role, what’s GitLab doing better than anyone else in terms of security? \n[Transparency](https://handbook.gitlab.com/handbook/values/#transparency) - it’s something not given much consideration when it comes to security in most organizations. Being transparent about issues and vulnerabilities (while still protecting our customers and services) allows the wider community visibility into how we handle security internally, but also enables contribution and promotes collaboration; ultimately strengthening our security.\n\nThere are obvious exceptions to the rule and not everything can be public, but I think that transparency in security is something that we as an industry should strive to do a better job at.\n\n#### Is there an area of security research you think deserves more attention? Why? \nI find the field of security in Industrial Control Systems fascinating. The intersection between cyber and physical systems presents its own set of unique challenges, and the stakes are so high, ranging from the integrity of public utilities to telecommunications entities.\n\n#### In the past decade, how has your area of expertise changed? \nLooking back - it’s been an interesting ride.\n\nI remember coming out of school still unsure if I wanted to pursue a career in hardware or software. I eventually narrowed the job search to two offers - designing robotics for a bottling facility or a software engineering position in telecommunications. Went the software path and never really looked back.\n\nSince then, I’ve moved from development to systems work to research, eventually settling in security which allows me the opportunity to work on a little bit of everything!\n\n## Now, for the questions you *really* want to have answered:\n\n\n\n#### VIM or EMACS? \nVIM - as they say, EMACS is a great operating system, lacking only a decent editor.\n\n#### You get one superpower, what is it? \nThe ability to always pick the fastest checkout line at the grocery store. I currently possess the opposite power.\n\n#### Is a hotdog a sandwich? \nI don’t like to talk about religion.\n\n#### You need pancakes. IHOP or local pancake shop? \nI really appreciate how accurately the first statement of this question describes most of my life. With that being said, mom and pop shop first, but I support all pancakes.\n\n\nPhoto by [Pixabay](https://www.pexels.com/@pixabay) from [Pexels](https://www.pexels.com).\n{: .note}",[720,9,720],{"slug":6228,"featured":6,"template":680},"the-security-tightrope","content:en-us:blog:the-security-tightrope.yml","The Security Tightrope","en-us/blog/the-security-tightrope.yml","en-us/blog/the-security-tightrope",{"_path":6234,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6235,"content":6241,"config":6246,"_id":6248,"_type":14,"title":6249,"_source":16,"_file":6250,"_stem":6251,"_extension":19},"/en-us/blog/the-sky-is-not-falling",{"title":6236,"description":6237,"ogTitle":6236,"ogDescription":6237,"noIndex":6,"ogImage":6238,"ogUrl":6239,"ogSiteName":667,"ogType":668,"canonicalUrls":6239,"schema":6240},"The sky is not falling","Tips to avoid the FUD and protect yourself online.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679692/Blog/Hero%20Images/dawn-idyllic-ocean-464344.jpg","https://about.gitlab.com/blog/the-sky-is-not-falling","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The sky is not falling\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2019-10-17\",\n      }",{"title":6236,"description":6237,"authors":6242,"heroImage":6238,"date":6243,"body":6244,"category":698,"tags":6245},[1010],"2019-10-17","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nIn honor of Security Awareness month...which, in our opinion, should be a year-round thing, we’ve pulled together some of our GitLab security team members' best security advice to keep us all a little safer online.  You might just see a pattern…\n\n### Advice: Look at security holistically. \n**It’s not just about securing the infrastructure, or the code, but also the people within the company. Security is everyone’s responsibility and effective security enablement and training go a long way.**\n\n![Rob Mitchell](https://about.gitlab.com/images/blogimages/rmitchell_scale.png){: .small.right.wrap-text}\n> Human error is the most significant cause of security problems. So many of the security breaches that have come to pass in recent years inevitably have an element where a person with good intentions has made a decision with dire consequences. So when thinking about Security, don’t just think about the cool hack or the clever technology. Most likely, the vulnerability will be a person who will make the mistake that causes a breach, so everything you can do to educate, inform and remove the potential for the human side of a system to fail will make the greatest difference. – **[Rob Mitchell, manager, Strategic Security](https://gitlab.com/gitlab-rmitchell)**  \n*Read more of Robert’s viewpoints in this blog post:[\"The difference transparency makes in security\"](/blog/the-difference-transparency-makes-in-security/).*\n\nThe saying “if something seems too good to be true” still rings true today, as much as it ever has. Be alert for things that are unexpected or seem odd. Malicious intent can hide in emails, websites, links, and even in your social media feed. Arming yourself with education and tools makes you much less susceptible to scammers. \n\n![Nicole Schwartz](https://about.gitlab.com/images/blogimages/nicoleschwartz_B_scale.png){: .small.right.wrap-text}\n> Everyday your attention is spread out over a multitude of things you need to accomplish, and in many cases that means you try and complete things like reading your email or social feeds quickly, and perhaps you multitask. This is a common situation, but malicious things can sneak past us most easily when our attention is divided. On a daily basis you could be exposed to [phishing scams](https://www.csoonline.com/article/2117843/what-is-phishing-how-this-cyber-attack-works-and-how-to-prevent-it.html), malicious links, articles written specifically to spark Fear Uncertainty and Doubt ([FUD](https://en.wikipedia.org/wiki/Fear,_uncertainty,_and_doubt)) to drive an emotional response, or plain old hoaxes. A lot of these scams are designed by people using [social engineering](https://www.webroot.com/us/en/resources/tips-articles/what-is-social-engineering), to target individuals. They use a bit of technology and a bit of acquired information to manipulate an individual into providing account credentials or access information. Luckily there are many tools out there that you can use to double check things that feel a little off. You can learn about [phishing scams](https://www.phishingbox.com/phishing-test) and [how to spot them](https://www.eset.com/ca/cybertraining/), verify [stories on Snopes](https://www.snopes.com/) or [data points on Wikipedia](https://en.wikipedia.org/wiki/Main_Page) that don’t check out, and for link checking, you can [expand links](https://urlex.org/) or [scan urls](https://scanurl.net/) to confirm that where you’re headed online is safe. And, if you want to be extra sure you’re directed to your bank or other account’s actual website, don’t click the link in the email, just type in the url directly or search to find it find from your favorite, trusted browser. – **[Nicole Schwartz, product manager, Secure](https://gitlab.com/NicoleSchwartz)**\n\nAnother great resource? Your company’s security team. A good security team would rather check that email you suspect might be a phishing attack rather than having someone fall for an attack. We map out how to [identify a phishing attack in our handbook](/handbook/security/security-assurance/governance/phishing.html#how-to-identify-a-basic-phishing-attack) and guide our employees on next steps if they suspect they’ve received one.\n\n### Advice: Make strong, unique passwords, use a password manager and consider adding two-factor authentication (2FA).  \n**This one is so important, we’re going to tell you twice.**\n\n![Paul Harrison](https://about.gitlab.com/images/blogimages/Pharrison_BW_scale.png){: .small.right.wrap-text}\n> Please, please, please, please use a password manager like 1Password, or LastPass, or Bitwarden (examples, not endorsements, YMMV and pick what fits your workflow best!) and start using it to generate and save unique and difficult passwords for each of your sites or services. You won’t need to remember them and so you don’t need to use a memorable one. Then, while you’re at it, turn on 2FA, and not that SMS/text message-based one. Use an app like Google Authenticator or Microsoft Authenticator, which will give you the six-digit number (aka [Time-Based, One-Time Password](https://en.wikipedia.org/wiki/Time-based_One-time_Password_algorithm)) on your mobile device, or better. Having strong, unique passwords and 2FA enabled will significantly decrease the chance of your accounts being compromised. – **[Paul Harrison, security manager, security operations](https://gitlab.com/pharrison)**   \n*Read more of Paul’s viewpoints in this [\"Ask GitLab Security\" blog post](/blog/ask-gitlab-security-paul-harrison/)*.\n\n\u003Cbr/>\n\n![Alex Groleau](https://about.gitlab.com/images/blogimages/Groleau_BW_scale.png){: .small.right.wrap-text}\n> Use a different password, preferably a completely random one, and two-factor authentication, for every website you visit. Use whatever form of password manager to keep them all straight. I have used 1Password for years. Websites are hacked daily and there is a high chance that one of the websites you have an account at was hacked within the last year. If your passwords are all the same, you are likely compromised as you read this sentence. – **[Alex Groleau, senior security engineer, Automation](https://gitlab.com/agroleau)**   \n*Read more of Alex’s viewpoints in this blog post, [How we use automation to scale up security at GitLab](https://medium.com/gitlab-magazine/how-we-use-automation-to-scale-up-security-at-gitlab-f8440574e0e4)*. \n\nBasically, you’re going to want to use a strong, unique password on every site and service you use online. And, if you don’t already have [2FA](https://en.wikipedia.org/wiki/Multi-factor_authentication), or the method of logging in with both information you know (username and password) and something you have (yubikey, authenticator app) and/or something you are (biometrics), enabled, you may want to do that.  See [Two Factor Auth](https://twofactorauth.org/) for a list of sites where 2FA is an option. Lastly, you may want to check out [haveibeenpwned.com](https://haveibeenpwned.com/), where you can see if your email address(s) or usernames have already been compromised.\n\n### Advice: Keep your systems updated and patch, patch, patch.\n\n![Alexander Dietrich](https://about.gitlab.com/images/blogimages/A_dietrich_BW_scale.png){: .small.right.wrap-text}\n> Use a password manager and generate unique passwords for everything. That way one website losing your data will not put all your other accounts at risk. Keep your systems updated, so you don’t get bitten by security holes that are years old. Ok, that was two pieces of advice. – **[Alexander Dietrich, senior security engineer, Automation](https://gitlab.com/adietrich)**   \n*Read more of Alexander’s viewpoints in this [\"Ask GitLab Security\" blog post](/blog/ask-gitlab-security-alexander-dietrich/)*.\n\nYes, okay, we slipped another recommendation for password management in there...but when something is so important (and simple to implement), it bears repeating. \n\n![Mark Loveless](https://about.gitlab.com/images/blogimages/mloveless_BW.png){: .small.right.wrap-text}\n> Patch. Attackers will take advantage of security flaws to gain access to systems and devices, so make sure you install the latest patches. Most operating systems allow you to set them up to download and install patches and updates automatically, so you should do this. The same should apply to various applications - for example many web browsers can be set to download and install updates and upgrades. Software vendors frequently release patches and various upgrades, and these often contain security fixes. While less common, some vendors in the past have released “silent” security patches where it seems like a regular update but a security patch is slipped in without public notification. It is possible that a fix for a crash or some other flaw might have some security ramifications that the vendor is unaware they’ve actually corrected. So always patch. – **[Mark Loveless, senior security engineer, Security Research](https://gitlab.com/mloveless)**   \n*See Mark’s ongoing [Zero Trust blog series](/blog/tags.html#zero-trust)*.\n\n### Advice: Avoid the FUD and adopt simple, secure practices into your everyday life. \n\n> Do not live and die by the headlines surrounding some evil hackers performing weird and mysterious digital sleight-of-hand and bringing destruction to all of humanity. The headlines are intended to not only get you to read the article but go to the online news site and generate revenue via ad impressions for the news site’s advertising, so they are often rather sensational. Yes you should patch and use strong unique passwords and multi-factor authentication. This fixes most problems. – **[Mark Loveless](https://gitlab.com/mloveless)**\n\n\u003Cbr/>\nYou don’t have to be an industry-trained security expert to operate more safely and securely online. It comes down to some basic principles and incorporating more secure practices into your everyday life. Is everything you need to know included in the list above? No way.  But, you can learn more about our [security best practices in our handbook](/handbook/security/).  \n\n**Have a suggestion or tip that we missed? Please share so our community can benefit, and together we can grow more secure.** \n\nPhoto by [Francesco Ungaro](https://www.pexels.com/@francesco-ungaro?utm_content=attributionCopyText&utm_medium=referral&utm_source=pexels) on [Pexels](https://www.pexels.com/photo/dawn-dusk-idyllic-ocean-464344/?utm_content=attributionCopyText&utm_medium=referral&utm_source=pexels)\n{: .note}\n",[9,720],{"slug":6247,"featured":6,"template":680},"the-sky-is-not-falling","content:en-us:blog:the-sky-is-not-falling.yml","The Sky Is Not Falling","en-us/blog/the-sky-is-not-falling.yml","en-us/blog/the-sky-is-not-falling",{"_path":6253,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6254,"content":6260,"config":6265,"_id":6267,"_type":14,"title":6268,"_source":16,"_file":6269,"_stem":6270,"_extension":19},"/en-us/blog/the-trouble-with-technical-interviews",{"title":6255,"description":6256,"ogTitle":6255,"ogDescription":6256,"noIndex":6,"ogImage":6257,"ogUrl":6258,"ogSiteName":667,"ogType":668,"canonicalUrls":6258,"schema":6259},"The trouble with technical interviews? They aren't like the job you're interviewing for","Forget the coding exercise. Here's how to create realistic scenarios for engineering candidates in technical interviews.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681148/Blog/Hero%20Images/nycbrooklyn.jpg","https://about.gitlab.com/blog/the-trouble-with-technical-interviews","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The trouble with technical interviews? They aren't like the job you're interviewing for\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-03-19\",\n      }",{"title":6255,"description":6256,"authors":6261,"heroImage":6257,"date":6262,"body":6263,"category":743,"tags":6264},[672],"2020-03-19","\n\nInterviewing for an engineering job in the tech world can mean [you’ll be asked all sorts of questions](https://stackify.com/devops-interview-questions/). Sometimes, the job interview questions can be pretty straightforward: “Tell me about a time that you have implemented an effective monitoring solution for a production system.” Other times, the questions are impossible to answer and designed to spark your creativity: “How many windows are in New York City?” After passing the initial interview, the applicant or candidate graduates to the next tier of interviewing: The often-dreaded technical interview.\n\n## What is a technical interview?\n\nA technical interview is one that is conducted to gauge a candidate’s skill level for positions in the information technology, engineering, and science fields. It may also determine how much a candidate knows in more niche areas of a company, such as marketing, sales, and HR.\n\n## How to prepare for a technical Interviews\n\nProspective engineers often face a challenge when it comes to preparing for the technical interview, largely because there is no playbook for how companies set them up technical. It’s unclear whether to prepare by memorizing many different topics, or focusing on specific projects. Is it better to practice with a computer or a peer engineer? There are an overwhelming number of resources available online, but with little clarity as to what the standard is for a technical interview and little guidance from the company on what to expect, most of the time engineers start technical interviews in the dark.\n\nInconsistencies in the technical interview process isn’t just a job candidate problem. In fact, many companies struggle to set up a technical interview process that is effective, equitable, and allows the hiring manager to compare candidates. The problem with technical interviewing compounds when a company is experiencing rapid growth.\n\n## What are the challenges of conducting technical interviews at a growing company\n\n\"Imagine you had a hiring target of doubling your team size and all your interviews are conducted remotely. Welcome to GitLab,\" says Clement Ho, [frontend engineering manager on the Monitor: Health team](/company/team/#ClemMakesApps) at GitLab.\n\n![Hiring chart shows GitLab more than doubled the number of hires from around 400 in 2019 to roughly 1300 by end of 2020](https://about.gitlab.com/images/blogimages/fei_hiringchart.jpg){: .shadow.medium.center}\n\nGitLab more than doubled the number of hires from around 400 in 2019 to roughly 1300 by end of 2020.\n{: .note.text-center}\n\nWe identifed three core challenges with orchestrating technical interviews as GitLab grows.\n\n1. We didn't have enough interviewers for the pipeline of candidates.\n2. Our technical interviewing process was inconsistent and even a little biased.\n3. It was difficult to measure whether or not we were raising the bar.\n\n\"And by raising the bar, I mean making sure each candidate that joins the team makes the team better,\" says Clement.\n\nThese problems are by no means unique to GitLab. Any engineering company that is scaling rapidly will encounter some growing pains when it comes to hiring, and many will end up falling back on some of the typical models for conducting technical interviews.\n\n## The typical technical interview methods\n\nDuring his talk, [\"Using GitLab to Power our Frontend Technical Interviews\" at GitLab Commit San Francisco](https://www.youtube.com/watch?v=jSbCt8b_4ug), Clement explained the four different techniques that are often employed in technical interviews. Each method comes with advantages and disadvantages from the perspective of the hiring manager.\n\n## What are good technical interview questions \n\nA good technical interview needs to be about more than practical skills – it’s about the whole package.A candidate should possess the ideal coding skills but also be a team and culture fit and be able to discuss developer topics efficiently. A technical interview should include both situational interview questions and a skills assessment to discern a candidate’s potential.\n\nThe types of questions to ask can concern a candidate’s technical abilities and background, their career journey so far, and queries specific to the team or company.\n\n## Types of questions asked during a technical interview and their purpose\nEven though employers have already reviewed your resume and cover letter, they will want you to flesh that out during the interview to learn more about how you attained those skills. In order to assess your level of experience, they will likely also ask you to provide concrete examples from prior jobs.\nMake sure you are prepared—do your research on the company and the type of questions you may be potentially asked. This will help build your confidence level and reduce any nervousness you might feel. It’s also an opportunity for you to set yourself apart from other candidates by showcasing your knowledge and additional skills you can bring to the job.\n \nIt is important to be honest about your skill set because that is something employers value. You may find the company will be willing to hire someone who is transparent about the areas where they need to improve and where they’d like to gain more skills.\n\nExamples of common questions to expect in a technical interview:\n\n- What coding languages are you most familiar with?\n- What is your experience with Kubernetes with a specific example?\n- What’s the purpose of continuous integration in an automated build?\n- How have your previous technical roles prepared you for this job?\n- Tell me about a time when you received an unexpected assignment: how did you react, and what did the experience teach you?\n- Please provide more details about your educational background and how it prepared you for this position.\n- How did you go about teaching yourself a necessary technical skill while you were working on a project?\n- What are your strengths, and where do you think you need to improve your skills?\n- Do you have any technical certifications?\n- Please detail the work you did on the project you are most proud of.\n- What are your favorite and least favorite tech tools, and why?\n- What are the pros/cons of working in an agile environment?\n\n### Sample technical interview questions and answers\n \n- **How do you stay current with your technical knowledge and skills?** It’s a good idea to list online content you use to educate yourself, as well as tutorials and conferences you have attended to gain more knowledge. Perhaps you have also worked closely with vendors or attended sessions to learn about new product features.\n- **How do you troubleshoot technical problems?** Discuss the steps you take when you are answering a question. This will give employers a sense of how you problem-solve, and it provides a good overview of how well you understand the relevant concepts. Even if you don’t answer a question correctly, it will show the interviewer your process and reasoning, which are also important. You can mention resources you use, such as GitLab and Stack Exchange, as well as the developer community and any publications you read for advice.\n- **What is your level of experience with the software programs mentioned on your resume?** Describe how many years you have used the tools, your impressions of them, and bring up the companies you used them at, with specific examples.\n- **What programming language are you most proficient in?** You should discuss how you have become proficient in this language and why it is the one you are most comfortable using. You can also cite other languages you are familiar with.\n- **Describe a time you made an error and how you resolved it.** Don’t use an example of an egregious error since that may put you in a negative light. Be sure to emphasize that you took responsibility and acted with integrity, and did whatever it took to resolve the issue.\n\n## What are some soft skills and coding skills to highlight in a technical interview\n\nA technical interview assesses your technical expertise, coding skills, and ability to fit into a team. However, soft skills are just as important and often aid in the development of more technical skills – particularly in a team setting.\n\nAs the technical interview progresses, be prepared to tackle some questions about soft skills like:\n\n- **Communication skills:** How does the candidate contribute to group discussions, confront problems, or give and receive feedback?\n- **Organizational skills:** What are the ways in which the candidate provides visibility into their work processes and their methods of staying on task?\n- **Collaboration skills:** Are they interested in helping their teammates? What do they think are the keys to successfully navigating a team project? How have they collaborated on past projects?\n- **Creative problem solving:** How do they work through a problem in a project? Do they use both analytical and creative thinking to come up with solutions?\n\n### How to prepare for verbal technical questions\n\nThere are countless articles online that try to prepare job candidates for a verbal technical interview, but whether this method truly effective for evaluating the technical competency of a software engineer is debatable.\n\nIn the typical scenario, the interviewer asks the candidate to describe a technical concept and tries to measure their fluency in said concept based on the quality of the conversation.\n\nThe advantage of this method is that the interviewer can understand how the candidate communicates, which is of particular importance when the engineering team is all-remote, as is the case at GitLab. The drawback? Being a good communicator does not necessarily mean the candidate knows how to code effectively.\n\n\"So I've interviewed candidates that could talk the talk, but they couldn't really write the code,” says Clement. \"And that's not a great situation for an engineer to join GitLab.\" Clement’s team has moved away from using verbal technical questions as a method for evaluating candidates.\n\n### Live coding exercises\n\nOne of the more popular methods for evaluating engineers is through live coding. While it allows the evaluator to see how engineering candidates answer data structure questions, it also has its disadvantages.\n\nA key advantage of live coding data structures is that it offers a fairly consistent measurement and evaluation.\n\n\"I can talk to another manager or another interviewer and be able to communicate, 'Hey, this person wasn't able to do a linked list, they got stuck here. They weren't able to understand a runtime efficiency here.' So it's pretty consistent,\" says Clement.\n\nBut the ability to create data structures is not always the best indicator of ability. Oftentimes engineers with a very traditional background or recent graduates will shine here, but someone who is more senior and able to do a lot of great things, but is perhaps not as brushed up on data structures, may struggle.\n\nLive coding interviews probably aren’t going anywhere fast, but the pitfalls of this method are well documented by engineers and hiring managers. Brennan Moore, a product engineer in New York City, explains why he does not conduct live coding interviews when evaluating a prospective candidate:\n\n> \"Much like the SAT when applying for college, live coding is a structured test. I didn’t go to a school that trained me to do live coding, and so will probably fail the test. As I’ve experienced it, live coding isn’t the meritocratic space that it pretends to be. Live coding interviews weed out the people who are good at live coding interviews,\" says Brennan in his [blog post](https://www.zamiang.com/post/why-i-don-t-do-live-coding-interviews).\n\nAt GitLab, we found that live coding exercises don't accurately represent engineering capability. Oftentimes, a recent computer science graduate will outperform a more senior candidate with a lot of valuable experience. In summary, live coding exercises will often disadvantage more senior candidates, people who are nervous in high-pressure situations (read: everyone), and advantages more junior engineers or people who have practiced live coding.\n\n### Digital prompt\n\nA third common method for evaluating candidates is to ask the engineer to code a UI using an online editor while on screen share with the evaluator.\n\nThe advantage of this method is that it allows the evaluator to observe how a candidate builds. The drawbacks here are similar to those with live coding. First, the engineer is under pressure to build while the evaluator watches on, making it a nerve-wracking situation. The other drawbacks come from an evaluation perspective: It is challenging to measure the effectiveness of this method and it is hard to compare between candidates.\n\n### Take-home project\n\nAny engineer (or writer, for that matter) can tell you, the supplemental take-home project is a very common ask when going through an interview process. The advantage here for us is that this assignment closely mimics the reality of building environments while working remotely at GitLab.\n\nBut this task comes with major drawbacks, mainly that it disadvantages candidates who may not have the time or capacity to complete the project.\n\n\"... imagine a scenario where you're a single parent and you have kids; you may not have as much opportunity to take dedicated time, a couple of hours after work to really focus on a take-home project compared to someone from a more privileged background,\" says Clement. \"They might be able to dedicate and output something better.\"\n\n[Diversity and inclusion is a core value](/company/culture/inclusion/) for GitLab, and anything that disadvantages candidates from underrepresented groups is not inclusive, and therefore suboptimal for evaluating candidates based on their engineering abilities.\n\n## What are they looking for during a technical interview?\n\nCompanies want candidates who can discuss the industry in the context of the job they are applying for. Be prepared to discuss examples of your work. Many will want to hear about soft skills, too—your ability to communicate and collaborate and work with others to problem-solve issues.\n\nThey will also want to see how passionate and enthusiastic you are and whether you have the self-motivation to not only do the job but take the initiative to do more than what you’re tasked with.\n\nAlso, interviewers will want to see whether candidates have the desire to increase their technical knowledge.\n\n## What are some online preparation tools and resources for technical interviews\n\n- Indeed offers a career guide to [help prepare for](https://www.indeed.com/career-advice/interviewing/what-is-a-technical-interview) a technical interview.\n- Interview Kickstart has several [webinars](https://learn.interviewkickstart.com/) to help prepare engineers for interviews.\n- Udemy offers a course in [Technical Interview Skills](https://www.udemy.com/course/technical-interview-skills/?utm_source=bing&utm_medium=udemyads&utm_campaign=BG-DSA_Webindex_la.EN_cc.BE&utm_content=deal4584&utm_term=_._ag_1222657343651662_._ad__._kw_udemy_._de_c_._dm__._pl__._ti_dat-2328215871879260%3Aloc-190_._li_103429_._pd__._&matchtype=b&msclkid=9f5132d9c84c17b02f7951a4f46279d6).\n- [Codecademy](https://www.codecademy.com/learn/technical-interview-practice-python?utm_id=t_kwd-79027793284383:loc-190:ag_1264438993811076:cp_370314525:n_o:d_c&msclkid=550de1275d811b2cfc0f82592b6d9626&utm_source=bing&utm_medium=cpc&utm_campaign=US%20Language%3A%20Pro%20-%20Broad&utm_term=%2Btechnical%20%2Binterview%20%2Bprep&utm_content=technical%20interview%20practice) also offers a course called - Technical Interview Practice with Python.\n- Here are some more general [interview tips](https://www.roberthalf.com/blog/job-interview-tips/interview-tips-to-help-you-land-the-job-you-want) that are applicable to all candidates.\n\n## Meaningful questions to ask the interviewer\n\nCandidates will also be given a chance to ask questions they might have to learn more about the company. This is a great opportunity to gain more insight into how the company operates, what its philosophy is, and its vision for the long term.\n\nIt’s also a good way to glean how the company views its IT team. If you don’t ask questions, that could give the impression you are unprepared or not terribly interested in the job.\n\nQuestions to ask can include:\n\n- What does a typical day looks like in this role?\n- Are there opportunities for training and further advancement?\n- What software development methodology do you use?\n- What are your code review practices?\n- Do you have on-call rotations? If so, how long is one rotation?\n- What are the responsibilities of the person on call?\n- Please provide more details about the team I will be working with, such as how many people are there, what their roles are, what the hierarchy is, and what areas of improvement you would like to see on the team.\n\n## The new way\n\nWhile each method for conducting a technical interview comes with advantages, there are also numerous disadvantages when it comes to conducting an effective and measurable evaluation and creating an equitable interview process. Under the guidance of Clement, the [Monitor:Health team](/handbook/engineering/development/ops/monitor/respond/) decided to interview frontend engineers in an entirely new way using GitLab.\n\nNow let's take a deep dive into the nuts and bolts of reinventing the technical interview for frontend engineers at GitLab. Just wondering about the key takeaways? [Skip ahead](#why-this-new-model-for-technical-interviews-is-better). As we continue to iterate on a more effective and measurable technical interview process, we hope this inspires other engineering organizations to rethink theirs and share learnings with us.\n\nOur first step: Standardize the interview process.\n\n### Fixing an MR on a test project\n\nThe team standardized the interview process by creating an open source test project, called `project-seeder`, which seeds projects to different candidates using a GitLab Bot. Candidates are assigned a merge request to troubleshoot in the project created for the technical interview. The `project-seeder` is powered by the GitLab Bot so the interviewer doesn't have to worry about API keys, and works in four steps:\n\n1. Exports the template project\n2. Imports template project\n3. Adds users with expiration\n4. Triggers pipeline for candidate to review MR\n\nThe candidate is sent an email with a link to the MR the candidate is assigned to fix as part of the technical interview.\n\n### Standardize the evaluation rubric\n\nThe team also created a standardized rubric for how the candidate's performance on a technical interview is evaluated.\n\n\"We don't want to be in a situation where unconscious bias or bias of one candidate over another plays a part because of our preconceived notions,\" says Clement.\n\nCreating a rubric that looks at multiple categories allows the evaluator to look at the performance of the candidate from a more holistic perspective, as opposed to looking at a candidate's performance on one technology.\n\nThe team created a [Periscope dashboard](/handbook/engineering/frontend/interview-metrics/) to create a feedback loop between the candidates and evaluators to identify opportunities for improvement in the technical interviewing process.\n\n![Frontend team used Periscrope to collect feedback from candidates who participate in technical interviews](https://about.gitlab.com/images/blogimages/fei_periscopedashboard.jpg){: .shadow.medium.center}\n\nThe frontend engineering team used Periscope to collect feedback from candidates who participate in technical interviews.\n{: .note.text-center}\n\n## Demoing the technical interview\n\n### Inside the technical interview project\n\nClement created a sample project to demonstrate how we use GitLab to power our technical interviews.\n\nIn the [gl-commit-example](https://gitlab.com/gl-commit-example) group, there is a subgroup with all the interview projects we are seeding to the imaginary candidates, a template, and a project seeder.\n\n![A screenshot of the sample project shows the interview project's subgroup, template, and project seeder application](https://about.gitlab.com/images/blogimages/fei_interviewproject.jpg){: .shadow.medium.center}\n\nThe interview project's subgroup, template, and project seeder application lives inside the sample project for the technical interview.\n{: .note.text-center}\n\n[Inside the template](https://gitlab.com/gl-commit-example/template), there are GitLab pages and the [interview test merge request](https://gitlab.com/gl-commit-example/template/-/merge_requests/1).\n\nThe assignment here is pretty simple. The candidate needs to update the website to say \"Hello GitLab Commit SF,\" but in order to accomplish this, the candidate will need to fix the failing pipeline.\n\n### Powering project-seeder\n\nWe use variables from GitLab CI to configure the [project-seeder application](https://gitlab.com/gl-commit-example/project-seeder).\n\n![Screenshot of the project for the project-seeder application](https://about.gitlab.com/images/blogimages/fei_projseederapp.jpg){: .shadow.medium.center}\n\nInside the project-seeder application which seeds the interview projects to job candidates.\n{: .note.text-center}\n\n\"I'm creating `new-project-example-two`, and I'm adding this bot user that I created and the expiration, so I can just easily run this pipeline and it'll seed this project,\" says Clement.\n\n![We use variables from the GitLab CI to configure the project-seeder applications](https://about.gitlab.com/images/blogimages/fei_variables.jpg){: .shadow.medium.center}\n\nThe next step is to run the setup pipeline, which will create the project, import the project, export the project, and share it with the job candidate.\n\n![A look inside the pipeline that will create the test project](https://about.gitlab.com/images/blogimages/fei_insidethepipeline.jpg){: .shadow.medium.center}\nA look inside the pipeline that will create the test project.\n{: .note.text-center}\n\nLooking inside example-one, we can see there is a project and [broken MR](https://gitlab.com/gl-commit-example/interview-projects/example-1/-/merge_requests/1).\n\n\"And an example for a candidate – they would probably look at the CI and see, 'Oh there's a failing test. Let's see what that's about. Oh, it looks like it's checking for \"hello world\". So since we changed the message earlier, we can just change this and get this test passing and then pass this interview,'\" says Clement.\n\n## Why this new model for technical interviews is better\n\nThe new model surpasses the old model because we created realistic scenarios that reflect what it's like to actually work for GitLab, and we established a more consistent method of measurement.\n\n\"So we're able to get better candidates overall. Candidates that pass through this technical interview, we're sure that they're going to be successful at GitLab,\" says Clement.\n\nBy designing our technical interviews this way, we can ensure that the interview project matches our actual product architecture at GitLab, which in this case is Ruby on Rails for Vue JS.\n\nWe also struggled in the past with finding a good way to check that the candidate knows how to use Git, and can navigate pipelines and testing. By using GitLab for interviews, we're able to confirm a candidate's competency with Git implicitly by evaluating their performance on the technical interviews.\n\nWe wanted to mirror the actual experience of troubleshooting a broken MR while working at GitLab, so we allow our candidates to use the internet during their technical interview. This allows the evaluator to see how the candidate solves problems and see their resourcefulness.\n\n\"If you're already using GitLab for your tooling, you're just exposing them to what it's like to work at GitLab; it's a more accurate representation,\" says Clement. \"And you can also make sure you're measuring testing proficiency and you make sure they understand how that works before they join your company.\"\n\n## Four key takeaways from our technical interview update\n\nWhether or not a company uses GitLab, there are a few key lessons that we learned by iterating on how we conduct technical interviews for engineers.\n\n1. **Make technical interviews as much like real work as possible**: Nine times out of ten, an engineering manager isn't going to sit back and watch an engineer break a sweat in a live coding exercise, any more than they will watch on as an engineer builds in UI. Create realistic scenarios based on the actual work and evaluate based on the candidate's performance.\n\n2. **Make any technical interview process \"open-book\"**: Engineering doesn't involve much rote memorization. Instead, allow the engineering candidate to use the internet (and in our case, the [GitLab Handbook](/handbook/)) to look up their questions. It's better to see how a candidate applies their knowledge and troubleshoots the inevitable problems that may arise. This change will likely improve your candidate experience too.\n\n3. **Standardize your rubric**: However the technical interview is done, make sure that the rubric is as objective as possible and that the candidate is evaluated based on various criteria, not on their familiarity with a particular technology. A strong rubric means a stronger, more valid method for evaluating candidate performance.\n\n4. **Create an inclusive process**: Think critically about how the technical interviewing process and evaluation is structured so a diverse group of candidates can be recruited and evaluated based on their merits. When in doubt, ask a diversity, inclusion and belonging expert or turn to your human resources team for advice. Still coming up empty? Hire a diversity consultant; it will be worth it.\n\n**Interviewing at GitLab?** We encourage you to use the resources GitLab creates during your technical interview. We don't publish our evaluation criteria publicly, but we do have the [Periscope dashboard](/handbook/engineering/frontend/interview-metrics/) which can provide some insight.\n\nWatch Clement's talk from [GitLab Commit San Francisco](https://www.youtube.com/watch?v=jSbCt8b_4ug) to learn more about how we used GitLab to power our technical interviewing process.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/jSbCt8b_4ug\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[3138,9],{"slug":6266,"featured":6,"template":680},"the-trouble-with-technical-interviews","content:en-us:blog:the-trouble-with-technical-interviews.yml","The Trouble With Technical Interviews","en-us/blog/the-trouble-with-technical-interviews.yml","en-us/blog/the-trouble-with-technical-interviews",{"_path":6272,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6273,"content":6279,"config":6284,"_id":6286,"_type":14,"title":6287,"_source":16,"_file":6288,"_stem":6289,"_extension":19},"/en-us/blog/thelastmile-gitlab",{"title":6274,"description":6275,"ogTitle":6274,"ogDescription":6275,"noIndex":6,"ogImage":6276,"ogUrl":6277,"ogSiteName":667,"ogType":668,"canonicalUrls":6277,"schema":6278},"Inside the collaboration between GitLab and The Last Mile","GitLab teamed up with The Last Mile to bring open source DevOps and tech mentorship to incarcerated populations across the United States.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681743/Blog/Hero%20Images/tlm-blogpost-banner.png","https://about.gitlab.com/blog/thelastmile-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside the collaboration between GitLab and The Last Mile\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2020-11-13\",\n      }",{"title":6274,"description":6275,"authors":6280,"heroImage":6276,"date":6281,"body":6282,"category":1517,"tags":6283},[3305],"2020-11-13","\n\n[The Last Mile (TLM)](https://thelastmile.org/), an organization focused on changing lives through technology, is tackling the daunting problem of mass incarceration in the United States by providing education and career training opportunities to incarcerated individuals to help break the generational cycle of incarceration. GitLab team members with similar passions and ideas connected with The Last Mile team and built a partnership to help bring the tech industry and mentorship directly to incarcerated individuals.\n\n## AMA to Coffee Chat to Partnership\n\nThe idea for TLM partnership originated during an AMA (or \"Ask Me Anything\" session) between GitLab CEO, [Sid Sijbrandij](/company/team/#sytses), and GitLab team members. [In one of these AMAs](https://www.youtube.com/watch?v=qi9zrymBO8o), [Tucker Logan](/company/team/#tuckcodes), a federal solutions architect at GitLab, asked Sid about the inspiration behind his [tweet](https://twitter.com/sytses/status/1227319454817804288) about mass incarceration. In a follow-up question, [Morgen Smith](/company/team/#msmith6), a sales development representative (SDR) for the Americas, asked Sid if GitLab would consider creating initiatives to help combat the school-to-prison pipeline.\n\nAs a former educator, Morgen has witnessed first-hand the national trend of disadvantaged youth being agressively disciplined in schools, which can then lead to juvenile offenses and later to formal charges. During the AMA, Morgen asked Sid: \"What do you think GitLab could do to encourage minority youth in this situation to be inspired by opportunities in tech?\" Sid shared his support and passion for the topic, and invited Morgen and Tyler to host an [open coffee chat](/company/culture/all-remote/informal-communication/#coffee-chats) on the topic to brainstorm ideas and next steps.\n\nDuring the coffee chat, Sid decided to take the smallest step, first. He visited San Quentin State Prison in San Rafael, Calif., and organized a call with Chris Redlitz, a co-founder of TLM. It turns out that TLM was using GitLab internally and also using the GitLab Community Edition to train nearly 300 students participating in their programs about how to use DevOps.\n\nTLM is a nonprofit program that started at San Quentin. TLM works with the incarcerated populations at men’s, women’s, and young adult correctional facilities to help them build relevant skills in technology with the goal of preparing individuals for successful reentry and building careers in business and technology. Today, TLM is in 23 classrooms across six states and has served 622 students since its inception.\n\n## TLM students learn DevOps with GitLab\n\nParticipants in TLM use the self-managed, free open core version of GitLab in their courses on Web Development. Each of the twenty individual classrooms have their own self-managed instance which around 20 students use to create and host their own private repositories. The sandbox environments are deployed centrally via Google Cloud. The core curriculum includes HTML/CSS and JavaScript, Node.js, Express.js, React.js, and Mongodb. GitLab is used primarily as a [source code management tool](/solutions/source-code-management/) for the students. Students write and commit code to personal repositories during course assignments. TLM Remote Instruction team also manages student-facing GitLab repositories to demonstrate industry best practices in merging, code collaboration, and version control platforms. Additionally, TLM leverages GitLab by providing students access to their repositories after they are released from prison, preserving commit history and all version control for the aspiring coders.\n\n\"By utilizing GitLab, The Last Mile students become comfortable using a best-in-class open source DevOps tool,\" says Tulio Cardozo, IT Manager, TLM. \"This experience empowers our students as aspiring software engineers, enabling them to enter the workforce with the collaboration and communication framework skills employers demand.\"\n\nThe GitLab team is partnering with the TLM Programs department to organize a series of webinars and workshops for the students. The first webinar kicked off in June of 2020 and was broadcast to 27 students (men, women, and youth programs), across four classrooms in several states. The topic was an introduction to GitLab and DevOps. Sid joined and shared the story of founding GitLab and his journey in tech. [Brendan O’Leary](/company/team/#brendan), a senior developer evangelist at GitLab, provided an overview of DevOps and explained how GitLab is the first single application for the entire DevOps lifecycle.\n\n\"The students appreciated the information on how to get started as new developers. Sid and Brendan helped the students believe they could accomplish anything with enough hard work,\" says a classroom facilitator from the Pendleton Youth Correctional Facility in Indiana.\n\nThe TLM team added that the webinar exposed students to a large company that works remotely and introduced them to an industry-recognized brand that the students use. In addition to the value of the content itself, there was a Q&A portion of the session where the studetns asked questions about the technology itself, such as how to start an open-source project and protecting intellectual property in open source, and about the facilitators' personal journey into tech.\n\nWatch the webinar with GitLab and TLM below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/ejHmvMjXJVU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nIn addition to the general workshop, the teams also collaborated on more technical content. The students at the Pendleton Juvenile Correctional Facility had a very special guest visit their [Web Development Fundamentals Course](https://thelastmile.org/our-work/), [Natalia Tepluhina](/company/team/#ntepluhina). Natalia, who currently lives in the Ukraine, is a frontend engineer at GitLab and also serves as a [core Vue.js team member](https://vuejs.org/v2/guide/team.html) and [core team member](/community/core-team/) of GitLab itself. Natalia answered a variety of questions about how to approach learning Javascript and provided a few demos related to specific questions from the students.\n\n## Mentorship for a career in DevOps\n\nGitLab and TLM also partnered on a series of Technical recruiting workshops with the classrooms. These have definitely been one of the highlights of the partnership thus far. In these workshops, a GitLab recruiter gave a presentation on the technical recruiting processes at GitLab, best practices during the application process and interview process, as well as an overview of what to expect during an interview. During each of the four sessions, the recruiters directly engaged with the participants, who asked a variety of questions, including:\n\n* How do I address incarceration on my resume?\n* What about background checks?\n* How do I gain professional experience while incarcerated?\n\nThe GitLab recruiting team was very sensitive to the participants' concerns and provided honest, clear answers, and great suggestions. The recruiters shared that during the process candidates should think of their recruiter as a resource, and they can always ask to speak to the People team at GitLab in confidence if it would help reassure them with any concerns they have regarding their criminal records. The recruiters encouraged the students to highlight their work in TLM courses on their resume and think about whether they can use course projects to start to build a portfolio. In addition, the facilitators encouraged participants to think about contributing to open source projects as a way to build technical skills, increase their network and mentorship opportunities.\n\n## How can open source help incarcerated populations gain experience in tech?\n\nThe discussion around contributing to open source projects as a way to build technical skills sparked a few different exciting ideas with the teams. One of these ideas was to hold a first time contributor workshop with alumni from TLM. The workshop was held in September 2020 had 16 alumni participants, four GitLab team members, including Sid, and five TLM team members. The workshop covered the basics on how to contribute to GitLab and demonstrated the step-by-step process. Participants were [provided an issue](https://gitlab.com/gitlab-org/gitlab/-/issues/247284) with a list of simple fixes with the label [\"good-for-new-contributors\"](https://gitlab.com/groups/gitlab-org/-/labels?utf8=%E2%9C%93&subscribed=&search=good+for+new+contributors) in the GitLab docs or handbook with typos or other minor changes. We had a few merge requests after just a few hours of the workshop! Participants were encouraged to tag GitLab team members for recognition and to win a pair of tanuki socks – by the end of the week we had given away six pairs of socks.\n\nParticipants and instructors appreciated the opportunity to learn in a hands-on way during the workshop:\n\n\"Thank you for the opportunity to participate in the GitLab workshop. I am so grateful to the GitLab staff for taking the time to introduce those of us who are new to GitLab to the history and functionality of the company. I learned so much, not just about how I can utilize GitLab to accomplish personal tasks more efficiently, but also how I can contribute and collaborate more with others and contribute to my local and global communities.\" - TLM staff and alumna.\n\nThe GitLab team found the experience equally rewarding. \"Working with The Last Mile was such a rewarding experience! When I think about how our product takes in contributions from all over the world and knowing it is also leveraged by those currently and or previously incarcerated really shows how truly 'inclusive' Git can be. Additionally, the empowerment it offers and the gift of knowledge and skill that can't be taken away is invaluable,\" says [Candace Brydsong Williams](/company/team/#cwilliams3), manage of the Diversity, Inclusion and Belonging program at GitLab.\n\n## How TLM uses GitLab technology\n\nGitLab also provides free licenses of our top-tier hosted application for the TLM team, who use our DevOps technology in nearly every aspect of their operations.\n\nTLM transitioned from GitHub to GitLab in 2019 after we provided the licenses. Initially, GitLab was used primarily in TLM's engineering department to track all internal processes with issues and Wikis. Infrastructure as code data and internal information is stored in repositories. Soon, TLM adopted GitLab technology in their education and programs departments, where it is now being used for project management. TLM now uses sprint planning, milestones, issues, priority levels, burndown charts, and issues boards to streamline project management across their departments.\n\nThe Last Mile has introduced numerous new and distinct use cases for GitLab. These include:\n\n* Issues are used to manage classroom facilities including to keep track of the impacts of COVID-19 on each classroom. For example, status updates are recorded on the issue and in the comments.\n* [The Last Mile’s reentry program](https://thelastmile.org/our-work/#reentry) uses GitLab to track returned citizen onboarding and service delivery process as well as tracking internal workloads, task efforts, and collaboration across teams. To-do lists are used to manage actions and labels are used to view the status of various efforts.\n\n\"The GitLab platform provides The Last Mile with a remarkable range of solutions -- from our application of GitOps workflows for managing our hybrid infrastructure, to our org-wide application of issues across teams,\" says Mike Bowie, Director of Engineering, The Last Mile. \"By solving such a broad range of our needs, GitLab enables us to focus on delivering value into our programs, instead of administering and maintaining a plethora of disparate tools.\"\n",[745,4630,811,9,1090],{"slug":6285,"featured":6,"template":680},"thelastmile-gitlab","content:en-us:blog:thelastmile-gitlab.yml","Thelastmile Gitlab","en-us/blog/thelastmile-gitlab.yml","en-us/blog/thelastmile-gitlab",{"_path":6291,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6292,"content":6297,"config":6302,"_id":6304,"_type":14,"title":6305,"_source":16,"_file":6306,"_stem":6307,"_extension":19},"/en-us/blog/there-and-back-again-in-one-release",{"title":6293,"description":6294,"ogTitle":6293,"ogDescription":6294,"noIndex":6,"ogImage":1452,"ogUrl":6295,"ogSiteName":667,"ogType":668,"canonicalUrls":6295,"schema":6296},"There and back again in one release","One GitLab team-member spent 5 weeks visiting and working with 6 different colleagues in 5 cities, in 4 countries across Europe and Asia","https://about.gitlab.com/blog/there-and-back-again-in-one-release","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"There and back again in one release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dimitrie Hoekstra\"}],\n        \"datePublished\": \"2017-06-30\",\n      }",{"title":6293,"description":6294,"authors":6298,"heroImage":1452,"date":6299,"body":6300,"category":299,"tags":6301},[4372],"2017-06-30","\n\nInspired by [Robert][robert] and [Douwe][douwe] and their trip called [Around the world in 6 releases][6-releases], another GitLab team-member [Dimitrie][dimitrie] accepted the challenge of pursuing the \"[Travel to visit GitLab team-members][travel-policy]\" company policy by making use of the \"[visiting grant][visiting-grant]\". Visiting 6 different colleagues in 5 cities, in 4 countries across Europe and Asia he has a story to tell. Read on for the why, how, who and where.\n\n\u003C!-- more -->\n\n## The incentive\n\nThis year has been an amazing journey for me, with one of the highlights being the [GitLab summit][summit] in [Cancun, Mexico][cancun]. This event, at which I could bring along my \"significant other\" to the other side of the world was an amazing opportunity. Meeting people you already know online for the first time is a strange, but wonderful experience.\n\n![foto van mexico met iedereen](https://about.gitlab.com/images/8_16/pic.jpg)\n\nThis brings me to meeting [Arihant][arihant], which is one of our support engineers. We met in the back of a van, which was driving us back from [ziplining and swimming in the jungle](/images/blogimages/gitlab-mexico-summit-2017/dimitrie-hoekstra-cenote.gif). [Arihant][arihant] told me about the wonders of [India][india] and made sure I knew I was welcome if I ever thought of visiting him. Working at a fully remote company such as GitLab, where this is an actual possibility, set my mind to work...\n\nThinking about going and actually making it real for yourself, is something else. Who will I meet, where can I go? As a bonus, two of my best friends were and are still backpacking the world. Meeting them so far away from home would be awesome.\n\nBeing one of the UX designers at GitLab, I remembered that one person of the UX team couldn't make it to the [summit][summit] back in January. [Hazel][hazel], who resides in [Taipei, Taiwan][taipei], was the only one which I didn't meet in real life yet. So I reached out to see if I could visit her. She loved the idea!\n\nMeanwhile [Collen][collen], another support engineer living in [Kampot, Cambodia][kampot], made my eyes roll out of their sockets with pictures of [Angkor Wat][angkorwat].\n\nLastly, both [Kushal][kushal] from [Pune, India][pune] and [Jen-shin][jen-shin] from [Taipei, Taiwan][taipei] were happy to meet me as well, when I would be nearby.\n\n## Planning\n\nKnowing who I was going to visit, I had to make sure that dates and people where going to match. Not being the first one in the company to do such a trip, I could see how my colleagues had planned ahead. I expanded and built on their spreadsheet concept, until it became my master plan!\n\nPerson availability, general trip timeline and total cost estimation were the first things I created. I needed them to get my plan approved. Eventually it got upgraded with people and personal travel information to make it more useful for myself. As a bonus, there are some nifty little automation features in there as well.\n\nYou can check out a template copy of it [here][template-copy]!\n\n[Google spreadsheets](https://www.google.com/sheets/about/) allows you to assign people to certain tasks and cells. This made it very easy for people to put in their own information, while being able to see information from others. In other words, efficient team collaboration!\n\nThe result was an approved plan, where everybody was on the same page, by following [the six core values of GitLab][values]: \"Collaboration, Results, Efficiency, Diversity, Iteration, and Transparency (CREDIT)\". Thanks GitLab!\n\n## The scale of remoteness\n\n![worldmap photo](https://about.gitlab.com/images/blogimages/there-and-back-again-in-one-release/full-globe-map-people.jpg){: .vista}\n\nWith my first one-way trip tickets booked, preparing and working eating up most of my time, it was suddenly time to go!\n\nOff to [London][london] it was, for an overnight transit. GitLab, it seems, is everywhere. So I met up with James to make the most of it. Some pints and laughs to celebrate the beginning of this journey, cheers!\n\n\u003C!-- carousel -->\n\n\u003Cdiv id=\"carousel-example-generic-1\" class=\"carousel slide\" data-ride=\"carousel\" data-interval=\"10000\">\n  \u003C!-- Indicators -->\n  \u003Col class=\"carousel-indicators\">\n    \u003Cli data-target=\"#carousel-example-generic-1\" data-slide-to=\"0\" class=\"active\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-1\" data-slide-to=\"1\">\u003C/li>\n  \u003C/ol>\n\n  \u003C!-- Wrapper for slides -->\n  \u003Cdiv class=\"carousel-inner\" role=\"listbox\">\n    \u003Cdiv class=\"item active\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/london.jpg\" alt=\"London\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/james-cheers.jpg\" alt=\"Cheers with James\">\n    \u003C/div>\n  \u003C/div>\n\n  \u003C!-- Controls -->\n  \u003Ca class=\"left carousel-control\" href=\"#carousel-example-generic-1\" role=\"button\" data-slide=\"prev\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-left\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M.44 10.13l8.345 8.345 2.007-2.007-6.814-6.814 6.814-6.815L8.785.832.44 9.177a.652.652 0 0 0-.202.477c0 .183.067.343.202.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Previous\u003C/span>\n  \u003C/a>\n  \u003Ca class=\"right carousel-control\" href=\"#carousel-example-generic-1\" role=\"button\" data-slide=\"next\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-right\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M10.59 10.13l-8.344 8.345L.24 16.468l6.814-6.814L.24 2.839 2.246.832l8.345 8.345a.652.652 0 0 1 .201.477.652.652 0 0 1-.201.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Next\u003C/span>\n  \u003C/a>\n\u003C/div>\n\n### Mumbai\n\nNext up [Mumbai, India][mumbai]. With a flight of around 9 hours, just the sheer scale of the distance we communicate over each day across the web suddenly becomes very real. After receiving a lot of help from [Arihant][arihant], I had a safe place to sleep in the busiest city I have ever seen. [Kindness][kindness] really is one of our core values.\n\n[Mumbai][mumbai] was a fascinating city, one of absolutes. A city where there is a lot of everything, good and bad. It has and still is growing at such a pace, that reality can't really keep up. There is however so much potential!\n\n\u003C!-- carousel -->\n\n\u003Cdiv id=\"carousel-example-generic-2\" class=\"carousel slide\" data-ride=\"carousel\" data-interval=\"10000\">\n  \u003C!-- Indicators -->\n  \u003Col class=\"carousel-indicators\">\n    \u003Cli data-target=\"#carousel-example-generic-2\" data-slide-to=\"0\" class=\"active\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-2\" data-slide-to=\"1\">\u003C/li>\n  \u003C/ol>\n\n  \u003C!-- Wrapper for slides -->\n  \u003Cdiv class=\"carousel-inner\" role=\"listbox\">\n    \u003Cdiv class=\"item active\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/mumbai_1.jpg\" alt=\"Mumbai highway\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/mumbai_food.jpg\" alt=\"Indian food\">\n    \u003C/div>\n  \u003C/div>\n\n  \u003C!-- Controls -->\n  \u003Ca class=\"left carousel-control\" href=\"#carousel-example-generic-2\" role=\"button\" data-slide=\"prev\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-left\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M.44 10.13l8.345 8.345 2.007-2.007-6.814-6.814 6.814-6.815L8.785.832.44 9.177a.652.652 0 0 0-.202.477c0 .183.067.343.202.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Previous\u003C/span>\n  \u003C/a>\n  \u003Ca class=\"right carousel-control\" href=\"#carousel-example-generic-2\" role=\"button\" data-slide=\"next\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-right\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M10.59 10.13l-8.344 8.345L.24 16.468l6.814-6.814L.24 2.839 2.246.832l8.345 8.345a.652.652 0 0 1 .201.477.652.652 0 0 1-.201.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Next\u003C/span>\n  \u003C/a>\n\u003C/div>\n\nThe amount of people is staggering, which results in the worst traffic I have ever seen. However as [Arihant][arihant] showed me, has the most delicious variety in food I have ever tasted. People are always ready to help you and after some time, you know how to get around!\n\nWhen you are working remotely while traveling, you are very reliant on a decent internet connection. In [India][india] apparently, you can depend fully on mobile internet, rather than on wifi. Working together with [Arihant][arihant] has been a blast and gave some insights as to how others plan out their days.\n\nI am very thankful to have had the opportunity to be introduced to his family, even cook with them, and to experience [India][india] as he does. Thanks [Arihant][arihant]!\n\n__Fun facts:__\n- My hotel, made use of a new concept called pods. It was like sleeping in a spaceship. [See for yourself][urbanpod]!\n- Driving on a scooter in [Mumbai][mumbai] is not for the faint of heart.\n- Tessa, my girlfriend back home who was very supportive of my intentions to make this trip, asked a favour of me while I was in [Mumbai][mumbai]. To try and watch Netflix together, as with traveling comes some form of a remote bonding. Soon we found out that the content library in [India][india] is not the same as back home. Why Netflix, why? [Google Duo][googleduo] to the rescue though, as I managed to watch an episode of \"The Americans\" through her phone on our tv back home. Speaking of perseverance. Thanks Google!\n\nIn the last days in [Mumbai][mumbai], my friends from home decided to join me. After enjoying the market and various street food it was time to travel to [Pune][pune], to meet with [Kushal][kushal].\n\n![mumbai photo](https://about.gitlab.com/images/blogimages/there-and-back-again-in-one-release/mumbai_2.jpg)\n\n### Pune\n\nIn [Mumbai][mumbai] I mostly worked from my Hotel. It had AC which is something you learn to appreciate when it's there. In [Pune][pune], [Kushal][kushal] arranged for a nice workplace at a flex workspace called [Bootstart][bootstart]. While my friends updated their blogs, me and [Kushal][kushal] collaborated on and discussed GitLab. A typical workday in [India][india].\n\n\u003C!-- carousel -->\n\n\u003Cdiv id=\"carousel-example-generic-3\" class=\"carousel slide\" data-ride=\"carousel\" data-interval=\"10000\">\n  \u003C!-- Indicators -->\n  \u003Col class=\"carousel-indicators\">\n    \u003Cli data-target=\"#carousel-example-generic-3\" data-slide-to=\"0\" class=\"active\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-3\" data-slide-to=\"1\">\u003C/li>\n  \u003C/ol>\n\n  \u003C!-- Wrapper for slides -->\n  \u003Cdiv class=\"carousel-inner\" role=\"listbox\">\n    \u003Cdiv class=\"item active\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/pune_1.jpg\" alt=\"Having dinner with Kushal and friends\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/goa_bus.jpg\" alt=\"Indian sleeper bus\">\n    \u003C/div>\n  \u003C/div>\n\n  \u003C!-- Controls -->\n  \u003Ca class=\"left carousel-control\" href=\"#carousel-example-generic-3\" role=\"button\" data-slide=\"prev\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-left\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M.44 10.13l8.345 8.345 2.007-2.007-6.814-6.814 6.814-6.815L8.785.832.44 9.177a.652.652 0 0 0-.202.477c0 .183.067.343.202.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Previous\u003C/span>\n  \u003C/a>\n  \u003Ca class=\"right carousel-control\" href=\"#carousel-example-generic-3\" role=\"button\" data-slide=\"next\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-right\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M10.59 10.13l-8.344 8.345L.24 16.468l6.814-6.814L.24 2.839 2.246.832l8.345 8.345a.652.652 0 0 1 .201.477.652.652 0 0 1-.201.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Next\u003C/span>\n  \u003C/a>\n\u003C/div>\n\nAfter a day with more awesome Indian food, me and my friends had to catch our bus to [Goa][goa]. This seemed easy, but was in the end quite the adventure. Quickly having to move to various locations where the bus might stop, jumping in and out auto rickshaws (Indian tuk tuks), plus [Kushal][kushal] speaking with the bus driver in yet another language, resulted in. Thanks [Kushal][kushal]!\n\n### Goa\n\nJust 12 hours and a flat tire later, we arrived in a [Goa][goa]. This was my in between mini-holiday. Mainly having a good time with my friends and converting from a digital nomad to a backpacker.\n\nThis proved to be quite the change, in a fun way. Hostels instead of hotels, and the cheaper the better. Our first hostel was just 100 INR, which is around 1.5 USD!\n\n\n\u003C!-- carousel -->\n\n\u003Cdiv id=\"carousel-example-generic-4\" class=\"carousel slide\" data-ride=\"carousel\" data-interval=\"10000\">\n  \u003C!-- Indicators -->\n  \u003Col class=\"carousel-indicators\">\n    \u003Cli data-target=\"#carousel-example-generic-4\" data-slide-to=\"0\" class=\"active\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-4\" data-slide-to=\"1\">\u003C/li>\n  \u003C/ol>\n\n  \u003C!-- Wrapper for slides -->\n  \u003Cdiv class=\"carousel-inner\" role=\"listbox\">\n    \u003Cdiv class=\"item active\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/goa_beach.jpg\" alt=\"On the beach with friends\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/goa_market.jpg\" alt=\"Indian nightmarkets\">\n    \u003C/div>\n  \u003C/div>\n\n  \u003C!-- Controls -->\n  \u003Ca class=\"left carousel-control\" href=\"#carousel-example-generic-4\" role=\"button\" data-slide=\"prev\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-left\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M.44 10.13l8.345 8.345 2.007-2.007-6.814-6.814 6.814-6.815L8.785.832.44 9.177a.652.652 0 0 0-.202.477c0 .183.067.343.202.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Previous\u003C/span>\n  \u003C/a>\n  \u003Ca class=\"right carousel-control\" href=\"#carousel-example-generic-4\" role=\"button\" data-slide=\"next\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-right\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M10.59 10.13l-8.344 8.345L.24 16.468l6.814-6.814L.24 2.839 2.246.832l8.345 8.345a.652.652 0 0 1 .201.477.652.652 0 0 1-.201.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Next\u003C/span>\n  \u003C/a>\n\u003C/div>\n\nSwimming, clubbing and enjoying the sun. Meeting a lot of new people and driving around on scooters. Eating mango's falling right off the trees. Visiting night markets and getting all relaxed. Leaving the chaos of the big cities behind.\n\n![enjoying the sun](https://about.gitlab.com/images/blogimages/there-and-back-again-in-one-release/goa_sun.jpg){: .vista}\n\n### Siem Reap\n\nTime flies and I do too! [Siem Reap, Cambodia][siemreap] was up next, with a small transit in [Hyderabad][hyderabad] and [Singapore][singapore]. [Collen][collen] soon picked me up at the airport with a Cambodian Tuk tuk. Having met each other in Mexico, it was easy to fall into the same flow as back there. In other words, great times ahead.\n\n\u003C!-- carousel -->\n\n\u003Cdiv id=\"carousel-example-generic-5\" class=\"carousel slide\" data-ride=\"carousel\" data-interval=\"10000\">\n  \u003C!-- Indicators -->\n  \u003Col class=\"carousel-indicators\">\n    \u003Cli data-target=\"#carousel-example-generic-5\" data-slide-to=\"0\" class=\"active\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-5\" data-slide-to=\"1\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-5\" data-slide-to=\"2\">\u003C/li>\n  \u003C/ol>\n\n  \u003C!-- Wrapper for slides -->\n  \u003Cdiv class=\"carousel-inner\" role=\"listbox\">\n    \u003Cdiv class=\"item active\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/siemreap_airportcollen.jpg\" alt=\"siem reap tuk tuk photo\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/siemreap_crocodiles.jpg\" alt=\"Crocodile farm in residential neighbourhood\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/siemreap_angkorwat_tree.jpg\" alt=\"tree in Angkor Wat\">\n    \u003C/div>\n\n  \u003C/div>\n\n  \u003C!-- Controls -->\n  \u003Ca class=\"left carousel-control\" href=\"#carousel-example-generic-5\" role=\"button\" data-slide=\"prev\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-left\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M.44 10.13l8.345 8.345 2.007-2.007-6.814-6.814 6.814-6.815L8.785.832.44 9.177a.652.652 0 0 0-.202.477c0 .183.067.343.202.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Previous\u003C/span>\n  \u003C/a>\n  \u003Ca class=\"right carousel-control\" href=\"#carousel-example-generic-5\" role=\"button\" data-slide=\"next\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-right\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M10.59 10.13l-8.344 8.345L.24 16.468l6.814-6.814L.24 2.839 2.246.832l8.345 8.345a.652.652 0 0 1 .201.477.652.652 0 0 1-.201.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Next\u003C/span>\n  \u003C/a>\n\u003C/div>\n\nCynthia, Collen's significant other, had arranged a wonderful AirBnb. Pure luxury after my time in [Goa][goa]. Oh, and did I tell about our crocodile neighbours?\n\nSoon I came to know this touristy little city booming with activity, by working together at the [Angkor Hub][angkorhub], eating at various Australian food joints like [this one][sistersreycafe], and going to the highlight of the area; [Angkor Wat][angkorwat].\n\nI would be the first to admit that this place is incredible. It is another ancient civilisation's legacy of which there are massive remains in the middle of the jungle. The sheer scale is enormous and captivating. Being used myself to the Roman and Celtic ruins scattered throughout Europe, this was an eye opener. Especially the old buildings covered with trees are a sight to behold.\n\nAll of this pleasantry must come to an end of course. Thanks for the awesome times [Collen][collen] and Cynthia!\n\n![siem reap tuk tuk photo](https://about.gitlab.com/images/blogimages/there-and-back-again-in-one-release/siemreap_angkorwat.jpg)\n\n### Bangkok\n\nLast up my list was [Taipei, Taiwan][taipei], with a small transit through [Bangkok, Thailand][bangkok]. At the airport I met [Patai][patai], who was familiar with GitLab. With our flight being delayed, this was a nice way to kill the time and do a bit of evangelising.\n\n![evangelising](https://about.gitlab.com/images/blogimages/there-and-back-again-in-one-release/thailand_patai.jpg)\n\n### Taipei\n\nI arrived late at night and got fairly quickly to my Hostel, where I would stay the rest of my days in [Taipei][taipei]. Both the city and the [Hostel][meander] were very modern, clear and approachable. I soon came to know about the excellent subway system and again lovely food.\n\nThe next day I met up with [Hazel][hazel], which was a happy moment. Apparently, I was the first GitLab team-member she has ever met! Soon we were going about the city, sightseeing temples and local markets. After working together we even did some ice skating, which I love to do!\n\n\u003C!-- carousel -->\n\n\u003Cdiv id=\"carousel-example-generic-6\" class=\"carousel slide\" data-ride=\"carousel\" data-interval=\"10000\">\n  \u003C!-- Indicators -->\n  \u003Col class=\"carousel-indicators\">\n    \u003Cli data-target=\"#carousel-example-generic-6\" data-slide-to=\"0\" class=\"active\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-6\" data-slide-to=\"1\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-6\" data-slide-to=\"2\">\u003C/li>\n  \u003C/ol>\n\n  \u003C!-- Wrapper for slides -->\n  \u003Cdiv class=\"carousel-inner\" role=\"listbox\">\n    \u003Cdiv class=\"item active\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/taipei_working.jpg\" alt=\"taipei working together\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/taipei_happy.jpg\" alt=\"Me and Hazel meet\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/there-and-back-again-in-one-release/taipei_hazel_and_jenshin.jpg\" alt=\"Hazel and Jen-shin meet\">\n    \u003C/div>\n  \u003C/div>\n\n  \u003C!-- Controls -->\n  \u003Ca class=\"left carousel-control\" href=\"#carousel-example-generic-6\" role=\"button\" data-slide=\"prev\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-left\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M.44 10.13l8.345 8.345 2.007-2.007-6.814-6.814 6.814-6.815L8.785.832.44 9.177a.652.652 0 0 0-.202.477c0 .183.067.343.202.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Previous\u003C/span>\n  \u003C/a>\n  \u003Ca class=\"right carousel-control\" href=\"#carousel-example-generic-6\" role=\"button\" data-slide=\"next\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-right\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M10.59 10.13l-8.344 8.345L.24 16.468l6.814-6.814L.24 2.839 2.246.832l8.345 8.345a.652.652 0 0 1 .201.477.652.652 0 0 1-.201.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Next\u003C/span>\n  \u003C/a>\n\u003C/div>\n\n[Jen-shin][jen-shin] lives in the same city, another opportunity! As I was not feeling too well on that day, we saw some of the more controversial structures in the city. The original plan was to go to one of the many waterfalls around there. A good reason to return one day.\n\nOn the last day we all managed to meet up, making it so, that [Hazel][hazel] and [Jen-shin][jen-shin] finally met each other as well. In other words, interconnecting GitLab. Thanks [Hazel][hazel] and [Jen-shin][jen-shin], for this awesome time together.\n\n## Going back again\n\nMy way back, was a bit rough, with a transit time of 16 hours in [Shanghai, China][shanghai]. It is strange how different the internet feels without services like Google, Facebook and so on, because of [the great firewall of China][greatfirewall]. I can only say that, this was by far the worst online experience I have had throughout my trip. Time to step up [China][china], openness is the answer!\n\nArriving back in the [Netherlands][netherlands], I quickly came to appreciate things that before went by unnoticed. This trip has opened up new insights into how things are and could be. Differences in culture and the way people live and work is what breathes character and what enriches the world. I hope we all interconnect even more, to see what great things are yet to be.\n\n![worldmap photo](https://about.gitlab.com/images/blogimages/there-and-back-again-in-one-release/full-globe-map-trip.jpg){: .vista}\n\nLooking at the complete journey I have made, it becomes clear that no distance is too big in order to connect, work together and have fun. It has been a life enriching experience for which I am happy and thankful to have had the opportunity. Thanks GitLab and all the people that I could visit and meet, it was an absolute pleasure to [get to know each other][gettoknow].\n\n## Sharing experience\n\nA trip is not complete without finding what works and what doesn't. I want to see others succeed in working abroad as well, therefore I created a separate section in the [GitLab handbook][handbook-workingabroad] with tips and tools that I found most helpful on such journeys.\n\nDo you love the GitLab way of working? [Join our team](/jobs/)!\n\n",[9,832],{"slug":6303,"featured":6,"template":680},"there-and-back-again-in-one-release","content:en-us:blog:there-and-back-again-in-one-release.yml","There And Back Again In One Release","en-us/blog/there-and-back-again-in-one-release.yml","en-us/blog/there-and-back-again-in-one-release",{"_path":6309,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6310,"content":6316,"config":6322,"_id":6324,"_type":14,"title":6325,"_source":16,"_file":6326,"_stem":6327,"_extension":19},"/en-us/blog/this-sre-attempted-to-roll-out-an-haproxy-change",{"title":6311,"description":6312,"ogTitle":6311,"ogDescription":6312,"noIndex":6,"ogImage":6313,"ogUrl":6314,"ogSiteName":667,"ogType":668,"canonicalUrls":6314,"schema":6315},"This SRE attempted to roll out an HAProxy config change. You won't believe what happened next...","This post is about a wild discovery made while investigating strange behavior from HAProxy. We dive into the pathology, describe how we found it, and share some investigative techniques used along the way.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681844/Blog/Hero%20Images/infra-proxy-protocol-wireshark-header.png","https://about.gitlab.com/blog/this-sre-attempted-to-roll-out-an-haproxy-change","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"This SRE attempted to roll out an HAProxy config change. You won't believe what happened next... \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Igor Wiedler\"}],\n        \"datePublished\": \"2021-01-14\",\n      }",{"title":6311,"description":6312,"authors":6317,"heroImage":6313,"date":6319,"body":6320,"category":743,"tags":6321},[6318],"Igor Wiedler","2021-01-14","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-02-12.\n{: .note .alert-info .text-center}\n\n## TL;DR\n\n- HAProxy has a `server-state-file` directive that persists some of its state across restarts.\n- This state file contains the port of each backend server.\n- If an `haproxy.cfg` change modifies the port, the new port will be overwritten with the previous one from the state file.\n- A workaround is to change the backend server name, so that it is considered to be a separate server that does not match what is in the state file.\n- This has implications for the rollout procedure we use on HAProxy.\n\n## Background\n\nAll of this occurred in the context of [the gitlab-pages PROXYv2\nproject](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/11902).\n\nThe rollout to staging involves changing the request flow from TCP proxying...\n```\n                   443                   443                        1443\n[ client ] -> [ google lb ] -> [ fe-pages-01-lb-gstg ] -> [ web-pages-01-sv-gstg ]\n      tcp,tls,http         tcp                        tcp            tcp,tls,http\n```\n\n... to using the [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt):\n```\n                   443                   443                        2443\n[ client ] -> [ google lb ] -> [ fe-pages-01-lb-gstg ] -> [ web-pages-01-sv-gstg ]\n      tcp,tls,http         tcp                     proxyv2,tcp       proxyv2,tcp,tls,http\n```\n\nThis is done through this change to `/etc/haproxy/haproxy.cfg` on\n`fe-pages-01-lb-gstg` (note the port change):\n```diff\n-    server web-pages-01-sv-gstg web-pages-01-sv-gstg.c.gitlab-staging-1.internal:1443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080\n-    server web-pages-02-sv-gstg web-pages-02-sv-gstg.c.gitlab-staging-1.internal:1443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080\n+    server web-pages-01-sv-gstg web-pages-01-sv-gstg.c.gitlab-staging-1.internal:2443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080 send-proxy-v2\n+    server web-pages-02-sv-gstg web-pages-02-sv-gstg.c.gitlab-staging-1.internal:2443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080 send-proxy-v2\n```\n\nSeems straightforward enough, let's go ahead and apply that change.\n\n## The brokenness\n\nAfter applying this change on one of the two `fe-pages` nodes, the requests to\nthat node start failing.\n\nBy retrying a few times via `curl` on the command line, we see this error:\n```\n➜  ~ curl -vvv https://jarv.staging.gitlab.io/pages-test/\n*   Trying 35.229.69.78...\n* TCP_NODELAY set\n* Connected to jarv.staging.gitlab.io (35.229.69.78) port 443 (#0)\n* ALPN, offering h2\n* ALPN, offering http/1.1\n* successfully set certificate verify locations:\n*   CAfile: /etc/ssl/cert.pem\n  CApath: none\n* TLSv1.2 (OUT), TLS handshake, Client hello (1):\n* LibreSSL SSL_connect: SSL_ERROR_SYSCALL in connection to jarv.staging.gitlab.io:443\n* Closing connection 0\ncurl: (35) LibreSSL SSL_connect: SSL_ERROR_SYSCALL in connection to jarv.staging.gitlab.io:443\n```\n\nThis looks like some issue in the TLS stack, or possibly with the underlying\nconnection. It turns out that `LibreSSL` does not give us much insight into the\nunderlying issue here.\n\nSo to get a better idea, let's capture a traffic dump on the HAProxy node:\n```\nsudo tcpdump -v -w \"$(pwd)/$(hostname).$(date +%Y%m%d_%H%M%S).pcap\"\n```\n\nWhile `tcpdump` is running, we can generate some traffic, then ctrl+c and pull\nthe dump down for further analysis. That `pcap` file can be opened in Wireshark, and this allows the data to be\nexplored and filtered interactively. Here, the first really surprising thing happens:\n\n**We do not see any traffic on port 2443.**\n\nAt the same time, we _do_ see some traffic on port 1443. But we came here to look at what underlies the LibreSSL error, and what we find\nis the following (by filtering for `ip.addr == \u003Cmy external ip>`). We have a TCP SYN/ACK, establishing the connection. Followed by the client\nsending a TLS \"hello\". After which the server closes the connection with a FIN.\n\nIn other words, the server is closing the connection on the client.\n\n## The early hypotheses\n\nSo here come the usual suspects:\n\n* Did we modify the correct place in the config file?\n* Did we catch all places we need to update in the config?\n* Did the HAProxy process parse th econfig successfully?\n* Did HAProxy actually reload?\n* Is there a difference between reload and restart?\n* Did we modify the correct config file?\n* Are there old lingering HAProxy processes on the box?\n* Are we actually sending traffic to this node?\n* Are backend health checks failing?\n* Is there anything in the HAProxy logs?\n\nNone of these gave any insights whatsoever.\n\nIn an effort to reproduce the issue, I ran HAProxy on my local machine with a\nsimilar config, proxying traffic to `web-pages-01-sv-gstg`. To my surprise, this\nworked correctly. I tested with different HAProxy versions. It worked locally, but not on\n`fe-pages-01`.\n\nAt this point I'm stumped. The local config is not identical to gstg, but quite\nsimilar. What could possibly be the difference?\n\n## Digging deeper\n\nThis is when I reached out to [Matt Smiley](/company/team#/msmiley) to help with the investigation.\n\nWe started off by repeating the experiment. We saw the same results:\n\n* Server closes connection after client sends TLS hello\n* No traffic from fe-pages to web-pages on port 2443\n* Traffic from fe-pages to web-pages on port 1443\n\nThe first lead was to look at the packets going to port 1443. What do they\ncontain? We see this:\n\n![Traffic capture in wireshark showing a TCP FIN and the string QUIT in the stream](https://about.gitlab.com/images/blogimages/infra-proxy-protocol-wireshark.png){: .shadow.center}\nTraffic capture in Wireshark showing a TCP FIN and the string QUIT in the stream\n{: .note.text-center}\n\nThere is mention of `jarv.staging.gitlab.io` which does match what the client sent. And before that there is some really weird preamble:\n\n```\n\"\\r\\n\\r\\n\\0\\r\\nQUIT\\n\"\n```\n\nWhat on earth is this? Is it from the PROXY protocol? Let's search [the\nspec](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) for the word\n\"QUIT.\" Nothing.\n\nIs this something in the HAProxy source? Searching for \"QUIT\" in the code\nreveals some hits, but none that explain this.\n\nSo this is a mystery. We leave it for now, and probe in a different direction.\n\n## Honing in\n\nHow come we are sending traffic to port 1443, when that port is not mentioned in\n`haproxy.cfg`? Where on earth is HAProxy getting that information from?\n\nI suggested running `strace` on HAProxy startup, so that we can see which files\nare being `open`ed. This is a bit tricky to do though, because the process is\nsystemd-managed.\n\nIt turns out that thanks to BPF and [BCC](https://github.com/iovisor/bcc), we\ncan actually listen on open events system-wide using the wonderful\n[opensnoop](https://github.com/iovisor/bcc/blob/master/tools/opensnoop.py). So we run `opensnoop` and restart `haproxy`, and this is what we see, highlighting the relevant bit:\n```\niwiedler@fe-pages-01-lb-gstg.c.gitlab-staging-1.internal:~$ sudo /usr/share/bcc/tools/opensnoop  -T --name haproxy\n\n...\n\n24.117171000  16702  haproxy             3   0 /etc/haproxy/haproxy.cfg\n...\n24.118099000  16702  haproxy             4   0 /etc/haproxy/errors/400.http\n...\n24.118333000  16702  haproxy             4   0 /etc/haproxy/cloudflare_ips_v4.lst\n...\n24.119109000  16702  haproxy             3   0 /etc/haproxy/state/global\n```\n\nWhat do we have here? `/etc/haproxy/state/global`, this seems oddly suspicious.\nWhat could it possibly be? Let's see what this file contains.\n```\niwiedler@fe-pages-01-lb-gstg.c.gitlab-staging-1.internal:~$ sudo cat /etc/haproxy/state/global\n\n1\n# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port srvrecord\n5 pages_http 1 web-pages-01-sv-gstg 10.224.26.2 2 0 1 1 21134 15 3 4 6 0 0 0 web-pages-01-sv-gstg.c.gitlab-staging-1.internal 1080 -\n5 pages_http 2 web-pages-02-sv-gstg 10.224.26.3 2 0 1 1 20994 15 3 4 6 0 0 0 web-pages-02-sv-gstg.c.gitlab-staging-1.internal 1080 -\n6 pages_https 1 web-pages-01-sv-gstg 10.224.26.2 2 0 1 1 21134 15 3 4 6 0 0 0 web-pages-01-sv-gstg.c.gitlab-staging-1.internal 1443 -\n6 pages_https 2 web-pages-02-sv-gstg 10.224.26.3 2 0 1 1 20994 15 3 4 6 0 0 0 web-pages-02-sv-gstg.c.gitlab-staging-1.internal 1443 -\n```\n\nIt appears we are storing some metadata for each backend server, including its old port number!\n\nNow, looking again in `haproxy.cfg`, we see:\n```\nglobal\n    ...\n    server-state-file /etc/haproxy/state/global\n```\n\nSo we are using the\n[`server-state-file`](https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#server-state-file)\ndirective. This will persist server state across HAProxy restarts. That is\nuseful to keep metadata consistent, such as whether a server was marked as\nMAINT.\n\n**However, it appears to be clobbering the port from `haproxy.cfg`!**\n\nThe suspected behavior is:\n\n* HAProxy is running with the old config: `web-pages-01-sv-gstg`, `1443`\n* `haproxy.cfg` is updated with the new config: `web-pages-01-sv-gstg`, `2443`, `send-proxy-v2`\n* HAProxy reload is initiated\n* HAProxy writes out the state to `/etc/haproxy/state/global` (including the old port of each backend server)\n* HAProxy starts up, reads `haproxy.cfg`, initializes itself with the new config: `web-pages-01-sv-gstg`, `2443`, `send-proxy-v2`\n* HAProxy reads the state from `/etc/haproxy/state/global`, matches on the backend server `web-pages-01-sv-gstg`, and overrides all values, including the port!\n\nThe result is that we are now attempting to send PROXYv2 traffic to the TLS port.\n\n## The workaround\n\nTo validate the theory and develop a potential workaround, we modify\n`haproxy.cfg` to use a different backend server name.\n\nThe new diff is:\n```diff\n-    server web-pages-01-sv-gstg         web-pages-01-sv-gstg.c.gitlab-staging-1.internal:1443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080\n-    server web-pages-02-sv-gstg         web-pages-02-sv-gstg.c.gitlab-staging-1.internal:1443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080\n+    server web-pages-01-sv-gstg-proxyv2 web-pages-01-sv-gstg.c.gitlab-staging-1.internal:2443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080 send-proxy-v2\n+    server web-pages-02-sv-gstg-proxyv2 web-pages-02-sv-gstg.c.gitlab-staging-1.internal:2443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080 send-proxy-v2\n```\n\nWith this config change in place, we reload HAProxy and indeed, it is now\nserving traffic correctly. See [the merge request fixing it](https://gitlab.com/gitlab-cookbooks/gitlab-haproxy/-/merge_requests/261).\n\n## A follow-up on those `QUIT` bytes\n\nNow, what is up with that `QUIT` message? Is it part of the PROXY protocol? Remember, searching [the\nspec](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) for that\nstring did not find any matches. However, Matt actually read the spec, and found this section on version 2 of\nthe protocol:\n```\nThe binary header format starts with a constant 12 bytes block containing the\nprotocol signature :\n\n   \\x0D \\x0A \\x0D \\x0A \\x00 \\x0D \\x0A \\x51 \\x55 \\x49 \\x54 \\x0A\n```\n\nThose are indeed the bytes that make up \"\\r\\n\\r\\n\\0\\r\\nQUIT\\n\". Slightly less mnemonic than the header from text-based version 1 of the protocol:\n```\n- a string identifying the protocol : \"PROXY\" ( \\x50 \\x52 \\x4F \\x58 \\x59 )\n  Seeing this string indicates that this is version 1 of the protocol.\n```\n\nWell, I suppose that explains it.\n\nI believe our work here is done. Don't forget to like and subscribe!\n",[2396,9],{"slug":6323,"featured":6,"template":680},"this-sre-attempted-to-roll-out-an-haproxy-change","content:en-us:blog:this-sre-attempted-to-roll-out-an-haproxy-change.yml","This Sre Attempted To Roll Out An Haproxy Change","en-us/blog/this-sre-attempted-to-roll-out-an-haproxy-change.yml","en-us/blog/this-sre-attempted-to-roll-out-an-haproxy-change",{"_path":6329,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6330,"content":6336,"config":6342,"_id":6344,"_type":14,"title":6345,"_source":16,"_file":6346,"_stem":6347,"_extension":19},"/en-us/blog/three-new-support-tools",{"title":6331,"description":6332,"ogTitle":6331,"ogDescription":6332,"noIndex":6,"ogImage":6333,"ogUrl":6334,"ogSiteName":667,"ogType":668,"canonicalUrls":6334,"schema":6335},"We've open sourced 3 tools to help troubleshoot system performance","Say hello to the open source tools our Support team is using to better summarize customer performance data – and find out how they can help you.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670405/Blog/Hero%20Images/open_source_tools.jpg","https://about.gitlab.com/blog/three-new-support-tools","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We've open sourced 3 tools to help troubleshoot system performance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Will Chandler\"},{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-07-24\",\n      }",{"title":6331,"description":6332,"authors":6337,"heroImage":6333,"date":6339,"body":6340,"category":743,"tags":6341},[6338,672],"Will Chandler","2019-07-24","\nOur self-managed customers often encounter issues related to performance, or the time it takes to execute something. In the past, the [Support team](/handbook/support/) had to pull data from disparate sources and cobble it together in order to analyze performance-related issues.\n\n“We’re dealing with someone else’s computer on support, so we have to be able to handle environments with limited observability,” says [Will Chandler](/company/team/#wchandler), senior support engineer. “We’re at the mercy of their infrastructure. That’s why the team has made tools to reduce the friction.”\n\n“With [GitLab.com](/pricing/), we have all of this fancy tooling that helps us collect performance data,” says [Lee Matos](/company/team/#leematos), support engineering manager. “But when we’re working with customers, we need to be ready to bring lightweight tools that don’t require a lot of setup that we can use based on what they have in place.”\n\nThe Support team is working on becoming more data driven by using three new tools designed to aggregate and summarize performance data for self-managed customers. A focus on data-driven decision-making improves the customer relationship and demonstrates our commitment to making performance a key feature of GitLab.\n\nWe'll look at three open source tools created by GitLab Self-Managed Support. Strace parser is a general tool that could be of use to anyone, while JSON Stats and GitLabSOS are tailored to GitLab, but could be easily modified.\n\n## 1. [Strace parser](https://gitlab.com/gitlab-com/support/toolbox/strace-parser)\n\n[Strace](https://gitlab.com/strace/strace) is a commonly used debugging and diagnostic tool in Linux that captures information about what’s happening inside processes running on our customers’ environments.\n\nUnlike [newer](http://man7.org/linux/man-pages/man1/perf.1.html) and [more powerful](https://github.com/iovisor/bpftrace) tracing tools, strace adds [significant overhead to a process](http://www.brendangregg.com/blog/2014-05-11/strace-wow-much-syscall.html). However, strace is generally available even on very old versions of Linux.\n\nAn strace of a single-threaded program is linear, but following the threads of execution quickly gets difficult when there are many processes being captured. At GitLab Support we are typically tracing [Unicorn](https://bogomips.org/unicorn/) workers or [Gitaly](https://gitlab.com/gitlab-org/gitaly), which are highly concurrent, resulting in hundreds of process IDs being traced and hundreds of thousands of lines of output from traces only a few seconds long.\n\nWill built [strace parser](https://gitlab.com/gitlab-com/support/toolbox/strace-parser) for these types of use cases. Strace parser summarizes the most meaningful processing data delivered by an strace in a more accessible format, allowing users to find the critical section sections of the data quickly.\n\nThe next two examples are from a GitLab customer that was using a very slow file system to host their .gitconfig file, which was a major performance bottleneck. But it was not immediately clear what was happening from the perspective of a user trying to troubleshoot. By running an strace on Gitaly, we were able to get a better understanding of why the system was so slow.\n\n```\n3694  13:45:06.207369 clock_gettime(CLOCK_MONOTONIC, {3016230, 201254200}) = 0 \u003C0.000015>\n3694  13:45:06.207409 futex(0x7f645bb49664, FUTEX_WAIT_BITSET_PRIVATE, 192398, {3016230, 299906871}, ffffffff \u003Cunfinished ...>\n3542  13:45:06.209616 \u003C... futex resumed> ) = -1 ETIMEDOUT (Connection timed out) \u003C0.005236>\n3542  13:45:06.209639 futex(0x1084ff0, FUTEX_WAKE, 1) = 1 \u003C0.000023>\n3510  13:45:06.209673 \u003C... futex resumed> ) = 0 \u003C0.002909>\n3542  13:45:06.209701 futex(0xc420896548, FUTEX_WAKE, 1 \u003Cunfinished ...>\n3510  13:45:06.209710 pselect6(0, NULL, NULL, NULL, {0, 20000}, NULL \u003Cunfinished ...>\n16780 13:45:06.209740 \u003C... futex resumed> ) = 0 \u003C0.002984>\n3542  13:45:06.209749 \u003C... futex resumed> ) = 1 \u003C0.000043>\n16780 13:45:06.209776 pselect6(0, NULL, NULL, NULL, {0, 3000}, NULL \u003Cunfinished ...>\n3542  13:45:06.209787 futex(0xc420053548, FUTEX_WAKE, 1 \u003Cunfinished ...>\n16780 13:45:06.209839 \u003C... pselect6 resumed> ) = 0 (Timeout) \u003C0.000056>\n3544  13:45:06.209853 \u003C... futex resumed> ) = 0 \u003C0.003148>\n3542  13:45:06.209861 \u003C... futex resumed> ) = 1 \u003C0.000069>\n3510  13:45:06.209868 \u003C... pselect6 resumed> ) = 0 (Timeout) \u003C0.000151>\n3544  13:45:06.209915 epoll_ctl(4\u003Canon_inode:[eventpoll]>, EPOLL_CTL_DEL, 181\u003CUNIX:[164869291]>, 0xc42105bb14 \u003Cunfinished ...>\n16780 13:45:06.210076 write(1\u003Cpipe:[55447]>, \"time=\\\"2019-02-14T18:45:06Z\\\" level=warning msg=\\\"health check failed\\\" error=\\\"rpc error: code = DeadlineExceeded desc = context deadline exceeded\\\" worker.name=gitaly-ruby.4\\n\", 170 \u003Cunfinished ...>\n3544  13:45:06.210093 \u003C... epoll_ctl resumed> ) = 0 \u003C0.000053>\n3542  13:45:06.210101 futex(0x1089020, FUTEX_WAIT, 0, {0, 480025102} \u003Cunfinished ...>\n3510  13:45:06.210109 pselect6(0, NULL, NULL, NULL, {0, 20000}, NULL \u003Cunfinished ...>\n16780 13:45:06.210153 \u003C... write resumed> ) = 170 \u003C0.000064>\n3544  13:45:06.210163 close(181\u003CUNIX:[164869291]> \u003Cunfinished ...>\n```\n\nThis strace delivers more than 300,000 lines about the different Gitaly processes running on this customer’s GitLab environment, making it challenging to decipher the flow of execution.\n{: .note.text-center}\n\n“In this case, we can use strace-parser to say, ‘Just give me all the files that were opened, and sort them by how long it took to open,’” says Will.\n\n```\n$ strace-parser trace.txt files --sort duration\n\nFiles Opened\n\n      pid      dur (ms)       timestamp            error         file name\n  -------    ----------    ---------------    ---------------    ---------\n    24670      5203.999    13:45:16.152985           -           /efs/gitlab/home/.gitconfig\n    24859      5296.580    13:45:23.367482           -           /efs/gitlab/home/.gitconfig\n    24584      5279.810    13:45:09.286019           -           /efs/gitlab/home/.gitconfig\n    24666      5276.975    13:45:16.079697           -           /efs/gitlab/home/.gitconfig\n    24667      5255.649    13:45:16.101009           -           /efs/gitlab/home/.gitconfig\n    14871      2594.364    13:45:18.762347           -           /efs/gitlab/home/.gitconfig\n    24885      2440.635    13:45:26.224189           -           /efs/gitlab/home/.gitconfig\n    24886      2432.980    13:45:26.231009           -           /efs/gitlab/home/.gitconfig\n    24656        55.873    13:45:15.916836        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/objects/info/alternates\n    24688        42.764    13:45:21.522789        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/objects/info/alternates\n     3709        39.631    13:45:07.816618           -           /efs/gitlab/home/.gitconfig\n    24583        37.959    13:45:09.218283           -           /efs/gitlab/home/.gitconfig\n```\n\nBy summarizing the data in this way, we see multiple files that took 2-5 seconds to open, which is several orders of magnitude slower than expected.\n{: .note.text-center}\n\n“If it’s a particularly busy server and we’re performing these actions 50 times a second, 100 times a second, that adds up really fast,” says Will. “Strace-Parser lets you drill down quickly, and say, ‘OK, this specific thing we’re doing is super slow.’”\n\n### Get a closer look at processes using strace-parser\n\nStrace-Parser can also be used to drill down into details of a process.\n\nThe previous output showed PID 24670 is one of the slower processes, so we use the parser to understand how this slow call impacted the performance of the process overall.\n\n```\n$ strace-parser trace.txt pid 24670\n\nPID 24670\n\n  271 syscalls, active time: 5303.438ms, user time: 34.662ms, total time: 5338.100ms\n  start time: 13:45:16.116671    end time: 13:45:21.454771\n\n  syscall                 count    total (ms)      max (ms)      avg (ms)      min (ms)    errors\n  -----------------    --------    ----------    ----------    ----------    ----------    --------\n  open                       29      5223.073      5203.999       180.106         0.031    ENOENT: 9\n  read                       25        46.303        28.747         1.852         0.031\n  access                     11         6.948         4.131         0.632         0.056    ENOENT: 3\n  lstat                       6         5.116         2.130         0.853         0.077    ENOENT: 4\n  mmap                       32         3.868         0.485         0.121         0.028\n  openat                      2         3.757         2.934         1.878         0.823\n  fstat                      28         3.395         0.272         0.121         0.033\n  munmap                     11         2.551         0.929         0.232         0.056\n  rt_sigaction               59         2.548         0.121         0.043         0.024\n  close                      22         2.375         0.279         0.108         0.032\n  mprotect                   14         0.927         0.174         0.066         0.032\n  execve                      1         0.621         0.621         0.621         0.621\n  brk                         6         0.595         0.210         0.099         0.046\n  stat                        8         0.388         0.082         0.048         0.027    ENOENT: 3\n  getdents                    4         0.361         0.138         0.090         0.044\n  rt_sigprocmask              3         0.141         0.059         0.047         0.040\n  write                       1         0.101         0.101         0.101         0.101\n  dup2                        3         0.090         0.032         0.030         0.026\n  arch_prctl                  1         0.077         0.077         0.077         0.077\n  getrlimit                   1         0.062         0.062         0.062         0.062\n  getcwd                      1         0.044         0.044         0.044         0.044\n  set_robust_list             1         0.035         0.035         0.035         0.035\n  set_tid_address             1         0.032         0.032         0.032         0.032\n  setpgid                     1         0.030         0.030         0.030         0.030\n  ---------------\n\n  Program Executed: /opt/gitlab/embedded/bin/git\n  Args: [\"--git-dir\" \"/nfs/gitlab/gitdata/repositories/group/project.git\" \"cat-file\" \"--batch-check\"]\n\n  Parent PID:  3563\n\n  Slowest file open times for PID 24670:\n\n    dur (ms)       timestamp            error         file name\n  ----------    ---------------    ---------------    ---------\n    5203.999    13:45:16.152985           -           /efs/gitlab/home/.gitconfig\n       5.420    13:45:16.143520           -           /nfs/gitlab/gitdata/repositories/group/project.git/config\n       2.959    13:45:21.372776           -           /efs/gitlab/home/.gitconfig\n       2.934    13:45:21.401073           -           /nfs/gitlab/gitdata/repositories/group/project.git/refs/\n       2.736    13:45:21.417333        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/info/grafts\n       2.683    13:45:21.421558           -           /nfs/gitlab/gitdata/repositories/group/project.git/objects/b7/ef5eba3a425af1e2a9cf6f51cb87454b6e1ad1\n       2.430    13:45:21.407170        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/objects/info/alternates\n       0.992    13:45:21.420213        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/shallow\n       0.823    13:45:21.405535           -           /nfs/gitlab/gitdata/repositories/group/project.git/objects/pack\n       0.275    13:45:21.380382           -           /nfs/gitlab/gitdata/repositories/group/project.git/config\n```\n\nThe output shows the time this process spent working was dominated by the slow file open. This data points the Support team in the right direction for fixing the underlying issue.\n{: .note.text-center}\n\nStrace itself has the `-c` flag which provides a similar summary, but its utility is limited when multiple processes are traced as it cannot break out per-process statistics.  Strace-Parser breaks these down to the PID level, and can also include the details of parent and child processes on demand.\n\n“In this case Will has identified an interesting area for our customer and then very quickly anchored it in the fact that when we look at that one spot it was slow,” says Lee. “When we’re debugging, having this data available really helps us pinpoint the problem for our customers so we can give them answers.”\n\nThe typical GitLab deployment has many different processes and services running at a time, which can create dozens of different child processes, so there is a large surface area for potential errors or slowness to occur.\n\nStrace-Parser is an open source, generic tool that anyone can use to better understand their strace data.\n\n## 2. [JSON Stats](https://gitlab.com/gitlab-com/support/toolbox/json_stats)\n\nWill also built [JSON Stats](https://gitlab.com/gitlab-com/support/toolbox/json_stats), a script that pulls performance statistics for different logs from the customer’s GitLab environment and summarizes the results in an easy-to-interpret table.\n\n```\nMETHOD                             COUNT     RPS     PERC99     PERC95     MEDIAN         MAX        MIN          SCORE    % FAIL\nFetchRemote                         2542    0.17  962176.08  130154.88   36580.23  4988513.00    1940.45  2445851585.19      1.06\nFindAllTags                         5200    0.34   30000.37   11538.63    1941.84    30006.23     252.10   156001924.68      1.63\nFindCommit                          3506    0.23   20859.98   16622.78   10841.86    30001.59    2528.67    73135073.75      0.23\nFindAllRemoteBranches               1664    0.11   20432.93   12996.75    8606.60   405503.94    1430.84    34000396.10      0.00\nAddRemote                           2603    0.17   10001.03    8094.97     825.46    10007.46     228.13    26032673.70      3.00\nFindLocalBranches                   2535    0.16   10004.68   10002.90    9051.91    10036.16    1260.89    25361871.05     34.32\n```\n\nThis output shows that we’re calling the “FindLocalBranches” service 2500+ times, and it’s failing 34% of the time.\n{: .note.text-center}\n\nThe Support team can use JSON Stats to ground their findings in evidence when evaluating overall performance for a customer. It's the same concept as strace-parser. Can we pivot the information in a way that it clearly becomes meaningful data?\n\n“It’s a quick way of extracting data that you can give to a customer. Instead of saying ‘Look, this failed once,’ we can say, ‘Look, this is failing a third of the time and that suggests there’s a problem with X,’” says Will.\n\nIn the sample output we see that JSON Stats is working with Gitaly logs, but the tool is nimble enough to work on the logs from all the heavy components of GitLab, including Rails, which runs the UI, and Sidekiq, which works on background tasks.\n\n“Some of our customers are very sophisticated and may have advanced monitoring that could give us this information. But we wanted to build a tool that would help us align and easily standardize on how we can get this performance information for customers that don’t have an advanced monitoring setup,” says Lee.\n\nWhile this specific tool isn't as helpful for people outside of the GitLab community, hopefully it helps to inspire others to consider how they are drawing conclusions, and how they can speed that process up.\n\n### Benchmarking with JSON Stats\n\nWill is building a future iteration of JSON Stats that will compare the performance of a customer’s GitLab instance with GitLab.com.\n\n![JSON benchmarking table](https://about.gitlab.com/images/blogimages/support-tools-update.png){: .shadow}\n\nBenchmarking the performance of GitLab.com (the first row) with the customer environment (second row), and the ratio between the two (third row). We can see that in the worst case, the customer’s 99th percentile FindCommit latency was almost eight times slower than it was on GitLab.com.\n{: .note.text-center}\n\n“Our vision here is to give accountability to our customers. We’re going to treat GitLab.com as the pinnacle experience for GitLab,” says Lee. “We want to use JSON Stats with benchmarking to help us understand how far away our customers are from GitLab.com.”\n\nLee and Will are still assessing how to set the target range for the customer’s instance of GitLab. But considering the wealth of resources allocated to GitLab.com, any self-managed customer that is performing within 5-10% of GitLab.com would be considered hugely successful.\n\n## 3. [GitLab SOS](https://gitlab.com/gitlab-com/support/toolbox/gitlabsos)\n\nWhen a customer encounters an issue, but they are unsure of what they problem is, they can run [GitLab SOS](https://gitlab.com/gitlab-com/support/toolbox/gitlabsos), created by support engineer [Cody West](/company/team/#codyww), to create a snapshot of different activities happening on their system. It's been so helpful in debugging GitLab that it's being added into our [Omnibus delivery](https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests/3430).\n\nBy capturing so much data about a moment in time during or shortly after encountering a problem, the support team is able to work asynchronously to troubleshoot on behalf of the customer.\n\n```\ncpuinfo              getenforce           iotop                netstat              opt                  sestatus             unicorn_stats\ndf_h                 gitlab_status        lscpu                netstat_i            pidstat              systemctl_unit_files uptime\ndmesg                gitlabsos.log        meminfo              nfsiostat            ps                   tainted              var\netc                  hostname             mount                nfsstat              sar_dev              ulimit               vmstat\nfree_m               iostat               mpstat               ntpq                 sar_tcp              uname\n```\n\nGitLab SOS works best if the script is run while an issue is occurring, or moments after, but even if the window of opportunity is missed you can still successfully gather information to diagnose the problem.\n{: .note.text-center}\n\n“If a customer is sharp, they may know what problems to look for already,” says Lee. “But if a customer is scared and they don’t know what to look for, then they can lean on a tool like GitLab SOS and learn from GitLab SOS. We even have some sharp customers that will generate the SOS output and begin to troubleshoot themselves because of the comprehensive overview it provides.”\n\n## These new tools drive data-driven decision-making in Support\n\nTools like strace-parser, JSON Stats, and GitLab SOS provide the Support team and GitLab customers with critical evidence about performance. By letting the data drive decision-making, the Support team is able to identify problems faster and quickly start debugging customer environments. Performance is a key feature of GitLab, and by filling our toolbox with data-driven solutions we can ensure greater [transparency](https://handbook.gitlab.com/handbook/values/#transparency) between GitLab and our customers.\n\nLearn more about debugging from a support engineering perspective in a GitLab Unfiltered video.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9W6QnpYewik\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nCover photo by [Diogo Nunes](https://unsplash.com/@dialex?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/tools?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[745,677,9],{"slug":6343,"featured":6,"template":680},"three-new-support-tools","content:en-us:blog:three-new-support-tools.yml","Three New Support Tools","en-us/blog/three-new-support-tools.yml","en-us/blog/three-new-support-tools",{"_path":6349,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6350,"content":6355,"config":6360,"_id":6362,"_type":14,"title":6363,"_source":16,"_file":6364,"_stem":6365,"_extension":19},"/en-us/blog/tips-for-mastering-video-calls",{"title":6351,"description":6352,"ogTitle":6351,"ogDescription":6352,"noIndex":6,"ogImage":1669,"ogUrl":6353,"ogSiteName":667,"ogType":668,"canonicalUrls":6353,"schema":6354},"5 Tips for mastering video calls","All-remote work wouldn't be possible without communication tools like video conferencing. Here are a few tips we use at GitLab.","https://about.gitlab.com/blog/tips-for-mastering-video-calls","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Tips for mastering video calls\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Betsy Church\"}],\n        \"datePublished\": \"2019-08-05\",\n      }",{"title":6351,"description":6352,"authors":6356,"heroImage":1669,"date":6357,"body":6358,"category":808,"tags":6359},[1267],"2019-08-05","\nAs remote and distributed work becomes more popular around the world, technology is\nconstantly evolving to support it. Communication tools, particularly video conferencing, allow\npeople to [connect and collaborate from anywhere with an internet connection](/blog/how-remote-work-at-gitlab-enables-location-independence/).\n\nFor an [all-remote](/company/culture/all-remote/), global company like\nGitLab, video calls are even more crucial to how\nwe [communicate](/handbook/communication), get work done as a team,\nand get to know each other.\n\nWhile best practices for video calls may seem obvious to the experienced remote\nprofessional, they often don’t come naturally to someone who’s used to working in a\ntraditional office setting. Here are some of the tips and tricks we use at\nGitLab to help you master the art of a successful video call.\n\n## 1. Know when a call is necessary\n\nFirst things first: Do you even need to have a video call? We’ve all had those\nwork weeks that are overloaded with calls or meetings, when oftentimes the\ntopic could have been discussed asynchronously in an email, Google Doc, or even a GitLab issue.\n\nWe default to [asynchronous communication](/handbook/communication/) at\nGitLab for many reasons. For one, it means there is far more documentation of your project\nand the work being done. On a global team, asynchronous communication allows for progress to continue even after\none person’s working day ends. Asynchronous work is also naturally more inclusive\nbecause [everyone can contribute](/company/mission/#mission).\n\nBut that doesn’t mean it works for every conversation. At GitLab, our rule of thumb is\nthat if you go back and forth about a topic three times, it’s time for a video call to\ntalk it out in real time.\n\n## 2. Use the right equipment correctly\n\nThe headphones and equipment you use can make a big difference in a successful video call,\nbut only if you use them the right way.\n\nIt's tempting to join a call using the built-in mic in your laptop, but grab a set of headphones instead. \nThey help eliminate interference and background noise for others on the call, making the conversation flow more smoothly.\n\nWhen you're preparing for your call, allow yourself a few minutes to test your audio and video, especially if it's the first time you've used that video conferencing tool. \n\nAnother equipment misstep that happens often, particularly in companies with a mix of in-office and remote\nemployees, is what we call “hybrid calls.” A [hybrid call](/handbook/communication/#hybrid-calls-are-horrible)\n is when two (or more) people in one room try to share the same equipment during a call\n – laptops, cameras, even headphones. Not only does this create a negative and non-inclusive\n  experience for anyone who’s not in the room, it rarely works well for the people sharing the equipment.\n\nDo your remote team members a favor: Use your own laptop, camera, and headphones (and\npreferably, your own conference room) so that you can talk, screen share, take notes, and be seen clearly.\n\n## 3. Turn on your video\n\nOne of the best aspects of video calls is that they allow us to have high-fidelity conversations without being in the same location. \nBut if you don't use your camera, it's tough to get to know the person you're meeting with. \nThis is especially important at GitLab or any all-remote company, since we only get together in person every\nso often. \n\nWhile it's certainly not required, we encourage team members to default to using their cameras whenever possible.\nWhether you just came back from the gym, you’re eating lunch at your desk, or your dog,\nspouse, or child is in the room (have them wave!), still consider turning on your camera.\nThese are all typical parts of a remote workday, and might even spark a conversation that\nhelps you get to know a member of your team better.\n\n## 4. Speak up\n\nIt might go against your instincts around meeting etiquette, but (politely) speaking up or\neven interrupting someone on a video call is perfectly okay.\n\nThis takes some getting used to because the latency on video calls means you may be\ntalking over someone for longer than you would in person. But you can’t have a dynamic,\ncollaborative meeting unless people are able to contribute, ask questions, and add context in the moment.\n\nIf you’re on a call and you notice a team member who appears to be struggling to get a word in,\ndon’t hesitate to specifically invite them into the conversation so that they have a\nchance to speak as well. Your call will be more productive if everyone feels able to participate.\n\n## 5. Watch the clock\n\nIt’s hard to decide which is more important: starting a call on time or ending it on time.\nSo we aim for both. A meeting that runs even two or three minutes over can put someone’s entire schedule behind.\n\nIf your team regularly struggles to end on time, try assigning someone ahead of each meeting to\nbe the time keeper and give everyone a heads up when the call is almost over. If you weren’t\nable to get through your whole agenda in the allotted time, either schedule an additional call,\nor continue to communicate about it asynchronously instead.\n\n___\n\nLearn more about GitLab’s approach to [all-remote work](https://about.gitlab.com/company/culture/all-remote/).\nInterested in joining our team? Browse our [vacancies](https://about.gitlab.com/jobs/).\n\nCover image by [Trust \"Tru\" Katsande](https://unsplash.com/@iamtru?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[832,9,811],{"slug":6361,"featured":6,"template":680},"tips-for-mastering-video-calls","content:en-us:blog:tips-for-mastering-video-calls.yml","Tips For Mastering Video Calls","en-us/blog/tips-for-mastering-video-calls.yml","en-us/blog/tips-for-mastering-video-calls",{"_path":6367,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6368,"content":6373,"config":6378,"_id":6380,"_type":14,"title":6381,"_source":16,"_file":6382,"_stem":6383,"_extension":19},"/en-us/blog/tips-for-working-from-home-remote-work",{"title":6369,"description":6370,"ogTitle":6369,"ogDescription":6370,"noIndex":6,"ogImage":1669,"ogUrl":6371,"ogSiteName":667,"ogType":668,"canonicalUrls":6371,"schema":6372},"How to live your best remote life","GitLab team members offer their best advice on working from home (and it might surprise you).","https://about.gitlab.com/blog/tips-for-working-from-home-remote-work","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to live your best remote life\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jarka Košanová et al\"}],\n        \"datePublished\": \"2019-07-09\",\n      }",{"title":6369,"description":6370,"authors":6374,"heroImage":1669,"date":6375,"body":6376,"category":808,"tags":6377},[1128],"2019-07-09","\nIf there’s one thing GitLab team members ought to be experts at by now it’s [how to work from home](/company/culture/all-remote/).\n\nThat’s why we asked for your single best [work-from-home](/blog/eliminating-distractions-and-getting-things-done/) tip. The answers – involving cars, snacks, clothing, exercise, and the importance of a closed door – just might surprise you.\n\n## The definition of done\n\nThis well-known software development concept applies equally to working at home. [Jarka Košanová](/company/team/#jajina_k), backend engineer, stresses the importance of flexibility when it comes to deciding when to end the work day. “Many people who start working remotely have a problem recognizing they should stop working for the day. It is easy to advise setting a time when you finish your work in the same way as if you were in an office. But then you kind of lose the flexibility working from home is about. What helped me was my husband returning home from his work. If I had a day without any big break I knew it was time to finish my work as well. If I had a day with a break, I knew, on the other hand, I still could work a bit more and it would be ok.”\n\n## Start your engines\n\nIf you’ve been used to a commute as the first part of your day, this tip senior content editor [Valerie Silverthorne](/company/team/#valsilverthorne) borrowed from a friend is for you. “A work-at-home friend starts his day off by jumping in his car and driving around his neighborhood. When he pulls back in to his driveway, his ‘commute’ is complete and he’s ready to start his day.”\n\nOthers at GitLab have their own, perhaps more carbon-friendly, versions of this ritual. [Daniel Gruesso](/company/team/#danielgruesso), Configure product manager, has a good plan that involves a different kind of locomotion. “Getting out of the house before I start my day is very important for me. Either walking the dog or going for a swim to clear my head and get the blood flowing.”\n\n## Literally dressing for success\n\n### No PJs\n\nClothes make the person, even, apparently, in a work-from-home culture. No PJs for Secure frontend engineer [Sam Beckham](/company/team/#samdbeckham), at least. His top tip: “Getting dressed. It might be tempting to work in your pyjamas all day (and I occasionally still do) but getting dressed and presenting yourself as if you were to be going to an office job can go a long way towards getting you into a working mindset.”\n\n### Dress comfy\n\nOf course, there’s dressed, and then there’s dressed up, which is a significant difference according to [Heather Simpson](/company/team/#Heatherswall), senior external communications analyst, Security. “(I) agree, getting dressed is crucial for me… although I appreciate the attire I feel comfortable with wearing here at GitLab vs at my old company (where I worked remotely for 10 years). I now feel completely comfortable in a hoodie.”\n\n### Have a uniform\n\nContent marketing associate [Suri Patel](/company/team/#suripatel) takes a different tack with her clothing. She’s assembled a work uniform that draws a distinct line between time on and time off. “I have a hard time not thinking about work after I close my computer, so I have 10 black shirts (they were on sale), specific sweaters, and trousers that I only wear while working. The last thing I want to do is pair my favorite dress with a stressful project and be reminded of that while at the beach.”\n\n## A routine routine\n\nWe know we like [boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions) and a lot of us really like/need/want a routine, particularly when it comes to working from home. [Carol Wainana](/company/team/#carowangar), service support agent, likes a routine. “Having a fixed routine that is time to wake up, time to start working, time for lunch and time to log off has been really beneficial for me.” And Heather agrees. “For me, a routine is helpful too – I start my day with coffee and checking out Twitter for interesting articles to read and/or share. This eases me into the day but still helps me stay informed and able to share thoughtful articles, etc., on the regular (mostly).”\n\nBut a routine doesn’t necessarily work for everyone, as [Tanya Pazitny](/company/team/#tpazitny), interim quality engineering manager, Secure & Enablement, points out. “I think you need to throw the concept of “nine to five” out the window and actively experiment to find what schedule lets you make the most of your time. I often find the midday slump to be so real, so if i'm feeling this way I step away for a while and then come back for a few hours in the evening when I generally feel supercharged.”\n\n## The magic of a door\n\nFor some of us work at home productivity starts with a closed door. That’s definitely true for Create senior backend engineer [Nick Thomas](/company/team/#nick.thomas). “(There need to be) clear signals to other inhabitants about whether you can be disturbed or not. When I'm in the spare room, the rule is simple – if the door is closed, do not come in.”\n\nHis other tip involves walking through the door and to somewhere else. “Also, I find it really helpful to work from ‘not home’ every now and again. A change is as good as a rest.”\n\n## The snack struggle is real\n\nTanya and [Mario de la Ossa](/company/team/#mdelaossa) both think remote work peril lies in the cupboard. “Keep junk food out of your house or you'll graze all day,” Tanya warns. Mario, backend engineer, Plan, agrees: “If I know there are snacks I WILL eat them, so I keep none in the house.”\n\n## The takeaway\n\nPerhaps [Brad Downey](/company/team/#TechBradD), strategic account leader, southern California, sums it up best: “Get dressed, have a proper work area (not the couch), and don’t eat lunch at your desk.”\n\nHave a great idea we didn’t mention? Leave it below and we’ll add it, and these, to the handbook.\n",[677,9,832],{"slug":6379,"featured":6,"template":680},"tips-for-working-from-home-remote-work","content:en-us:blog:tips-for-working-from-home-remote-work.yml","Tips For Working From Home Remote Work","en-us/blog/tips-for-working-from-home-remote-work.yml","en-us/blog/tips-for-working-from-home-remote-work",{"_path":6385,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6386,"content":6392,"config":6397,"_id":6399,"_type":14,"title":6400,"_source":16,"_file":6401,"_stem":6402,"_extension":19},"/en-us/blog/top-engineering-stories-gitlab",{"title":6387,"description":6388,"ogTitle":6387,"ogDescription":6388,"noIndex":6,"ogImage":6389,"ogUrl":6390,"ogSiteName":667,"ogType":668,"canonicalUrls":6390,"schema":6391},"These are your favorite GitLab engineering stories","From building a Web IDE, to our migration to GCP, to tracking down a bug in NFS – these are some of our most popular engineering blog posts.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681835/Blog/Hero%20Images/stairs_iteration.jpg","https://about.gitlab.com/blog/top-engineering-stories-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"These are your favorite GitLab engineering stories\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2021-01-07\",\n      }",{"title":6387,"description":6388,"authors":6393,"heroImage":6389,"date":6394,"body":6395,"category":743,"tags":6396},[672],"2021-01-07","\n\nSome of our most popular and enduring engineering stories show how we use GitLab technology to take small steps to achieve major upgrades, fixes, and integrations to improve upon GitLab features. These stories demonstrate one of our core values at GitLab, [iteration](https://handbook.gitlab.com/handbook/values/#iteration) – meaning we ship the smallest changes first. When it comes to building new features or introducing fixes at GitLab, our engineering team operates under the principle that incremental change drives the greatest value.\n\n## How we executed on milestone migrations\n\n### Azure to GCP\n\nAzure simply was not cutting it for hosting GitLab.com, and we decided it was time to migrate GitLab over to Google Cloud Platform (GCP). This was no small decision or endeavor, and we documented our end-to-end process publicly in the hopes that other companies might learn from our experience. [Read the blog post describing the migration to GCP](/blog/gitlab-journey-from-azure-to-gcp/), or watch the video below to learn more about this major migration.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Ve_9mbJHPXQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nNext, we explain how we analyzed data to see [how GitLab.com was performing on GCP after this major migration](/blog/gitlab-com-stability-post-gcp-migration/). Turns out, GitLab.com availability improved by 61% post-migration.\n\n### Upgrading PostgreSQL\n\nIn another blog post, one of GitLab.com’s main PostgreSQL clusters needed a major version upgrade. We knew it wouldn’t be easy, but in May 2020, we pulled off a [near-perfect execution of this substantial upgrade](/blog/gitlab-pg-upgrade/). We explain how the process unfolded, from planning to testing to full automation.\n\n### Moving to Kubernetes\n\n[Migrating GitLab.com over to Kubernetes](/blog/year-of-kubernetes/) was a painstaking and complex process. In one of our most popular blog posts last year, we share the trials and triumphs from the year after the migration.\n\n## Code detectives show their debugging work\n\nGitLab engineering fellow [Stan Hu](/company/team/#stanhu) explains [how debugging a bug in the Docker client library](/blog/tracking-down-missing-tcp-keepalives/) that was used in the GitLab runner taught him more about Docker, Golang, and even GitLab.\n\nBack in 2018, a customer flagged a bug in the NFS that the Support team escalated to Stan and his fellow engineers. It took _two weeks_ to hunt down the NFS bug that was disrupting the Linux kernel, and [Stan chronicles the intricacies of his investigation in this blog post](/blog/how-we-spent-two-weeks-hunting-an-nfs-bug/).\n\nAfter GitLab.com users reported getting the same, mysterious error message, our Scalability team rolled up their sleeves to figure out the origins of the message – and uncovered a complex problem.\n\n![Graph showing connection errors is part of the GitLab Scalability team's troubleshooting efforts](https://about.gitlab.com/images/blogimages/connectionerrorsgraph.png){: .shadow}\nGraph showing connection errors, grouped by second-of-the-minute, indicates a lot of clustering going on in the time dimension.\n{: .note .text-center}\n\nThere were [six key lessons we learned while debugging this scaling problem on GitLab.com](/blog/tyranny-of-the-clock/).\n\n## Using data for anomaly detection\n\nTwo years ago we switched over from our legacy NFS file-sharing service to Gitaly, and soon we noticed that our Gitaly service was lagging.\n\n![Graph showing lagging problems with Gitaly service](https://about.gitlab.com/images/blogimages/graph-01.png){: .shadow}\nWe noticed that the 99th percentile performance of the gRPC endpoint for Gitaly service had dropped from 400ms down to 100ms for an unknown reason.\n{: .note .text-center}\n\nThrough solid application monitoring, we were able to identify the problem and quickly fix it. [Unpack the process behind the Gitaly fix in this popular blog post](/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/).\n\nPrometheus reports on time-series data, which can be used for anomaly detection and alerting. [Learn how you can use this data to set up analysis and alerting with Prometheus](/blog/anomaly-detection-using-prometheus/) and use the code snippets to try it out in your own system.\n\n## Inside GitLab\n\nWhen GitLab co-founder Dmitriy Zaporozhets built GitLab on Ruby on Rails, despite working mostly in PHP at the time. In this foundational blog post, our GitLab CEO, [Sid Sijbrandij](/company/team/#sytses), explains [why building on rails was the best decision for GitLab](/blog/why-we-use-rails-to-build-gitlab/).\n\nWe built our Web IDE to make it easier to edit code using GitLab. Explore [how we took the GitLab Web IDE from an experiment to working feature](/blog/introducing-gitlab-s-integrated-development-environment/).\n\n## The extensions and integrations that power us\n\n### How we built a VS Code extension\n\nAfter a survey revealed that VS Code was the most-used tool by our Frontend team, we decided to build a VS Code extension that works with GitLab. Learn [how we built the VS Code extension](/blog/gitlab-vscode-extension/) in a series of iterations.\n\nSoon, we found out our VS Code extension was very popular. So we wrote a blog post explaining [how users can develop their own extensions with VS Code and GitLab](/blog/vscode-extension-development-with-gitlab/).\n\n### Challenges with Elasticsearch\n\nElasticsearch enables global code search on GitLab.com and would allow us to run advanced syntax search and advanced global search of our codebase. But we ran into trouble with GitLab’s integration with Elasticsearch and [hit some dead ends on our first attempt to initiate the integration](/blog/enabling-global-search-elasticsearch-gitlab-com/). We recalibrated, learned from our mistakes, and [made a second attempt at the integration](/blog/elasticsearch-update/) a few months later.\n\n### Dogfooding at GitLab\n\nThe engineering productivity team at GitLab built Insights to examine trends in the GitLab.com issue tracker at a high-level, but soon realized Insights could be useful to our GitLab Ultimate users. Watch the video below or [read the blog post to explore the origins of Insights](/blog/insights/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/kKnQzS9qorc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### How we reimagined the technical interview\n\nThe trouble with technical interviews is that they rarely reflect the job you’re interviewing for. Learn how former GitLab team member, Clement Ho, [reimagined the technical interview for Frontend engineers](/blog/the-trouble-with-technical-interviews/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/dNABW84sTzs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## How troubleshooting and security modeling can prevent disaster\n\nIn a major feat of coordination, our globally distributed engineering team managed to work synchronously to troubleshoot an issue with our Hashicorp Consul, successfully avoiding any significant problems, including the outage we anticipated. Read \"[The consul outage that never happened](/blog/the-consul-outage-that-never-happened/)\" to learn how they did it.\n\nOur Red team at GitLab is continually searching for vulnerabilities, big and small, and introduces patches to make it function. In one of our most popular 2020 posts, [our security team explains how an attacker who already gained unauthorized access to the cloud platform might be able to take advantage of GCP privileges](/blog/plundering-gcp-escalating-privileges-in-google-cloud-platform/), and how replicating this breach scenario could help you prevent this from happening on your GCP instance.\n\n**Did we miss something?** Share a link to your favorite GitLab engineering story below and [check out our round-up of some of our top stories about how to apply GitLab technology](/blog/gitlab-for-cicd-agile-gitops-cloudnative/).\n\nCover image by [Jamie Saw](https://unsplash.com/@jsclick?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/series-of-stairs?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,231],{"slug":6398,"featured":6,"template":680},"top-engineering-stories-gitlab","content:en-us:blog:top-engineering-stories-gitlab.yml","Top Engineering Stories Gitlab","en-us/blog/top-engineering-stories-gitlab.yml","en-us/blog/top-engineering-stories-gitlab",{"_path":6404,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6405,"content":6411,"config":6416,"_id":6418,"_type":14,"title":6419,"_source":16,"_file":6420,"_stem":6421,"_extension":19},"/en-us/blog/troubleshoot-delays-with-code-review-analytics",{"title":6406,"description":6407,"ogTitle":6406,"ogDescription":6407,"noIndex":6,"ogImage":6408,"ogUrl":6409,"ogSiteName":667,"ogType":668,"canonicalUrls":6409,"schema":6410},"Troubleshoot delays with our Code Review Analytics tool","Introduced in GitLab 12.7, Code Review Analytics can help you dig deeper into slow-moving merge requests.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681140/Blog/Hero%20Images/code_review_analytics.png","https://about.gitlab.com/blog/troubleshoot-delays-with-code-review-analytics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Troubleshoot delays with our Code Review Analytics tool\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Ward\"}],\n        \"datePublished\": \"2020-03-18\",\n      }",{"title":6406,"description":6407,"authors":6412,"heroImage":6408,"date":6413,"body":6414,"category":675,"tags":6415},[1066],"2020-03-18","\n\nModern software development moves fast. Development teams can fix issues and have a release deployed to customers within minutes, all fed through a continuous testing, build, and deployment process. Thanks to containerization, development teams can experiment with new techniques and technologies for particular application services, without affecting an application as a whole.\n\nBut with this speed and potential pace of change, it's easy to lose sight of what matters: Are any of these changes important for customers and their needs, or do they bring any business value? This is what [Value Stream Analytics](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html) hopes to answer.\n\nDrawing on lessons learned from the lean movement, delivery teams, engineering managers, and directors are applying \"[value stream mapping](https://en.wikipedia.org/wiki/Value-stream_mapping)\" to understand and optimize delivery of value, from the time an idea is born to its impact on the business in production. GitLab's analytics capabilities provide near real-time insights into the flow of value through the team's value stream without requiring complex system integrations, configuration, or add-on tools.\n\nAt the highest project overview level Value Stream Analytics ([since GitLab 12.3](/releases/2019/09/22/gitlab-12-3-released/#analytics-workspace), and previously called \"cycle analytics\") helps measure the velocity of development cycles in your team, and the time it takes them from planning to monitoring for each project. Currently, it tracks the seven stages to make calculations, and the associated feature:\n\n-   **Issue (Tracker)**: Time to schedule an issue (by milestone or by adding it to an issue board)\n-   **Plan (Board)**: Time to first commit\n-   **Code (IDE)**: Time to create a merge request\n-   **Test (CI)**: Time it takes GitLab CI/CD to test your code\n-   **Review (Merge Request/MR)**: Time spent on code review. Measures the median time taken to review the merge request that has the closing issue pattern, between its creation and until it's merged.\n-   **Staging (Continuous Deployment)**: Time between merging and deploying to production\n-   **Total (Total)**: Total lifecycle time. That is the velocity of the project or team. Previously known as production.\n\nIf the Value Stream Analytics feature shows that reviews are your team's most time-consuming step or your team agrees that code review is moving too slowly, then it's time to dig deeper. [GitLab 12.7](/releases/2020/01/22/gitlab-12-7-released/#code-review-analytics) introduced [Code Review Analytics](https://docs.gitlab.com/ee/user/analytics/code_review_analytics.html) to help you dig deeper into slow-moving merge requests and understand what is causing delays. \n\nIn our 2019 and 2020 [developer surveys](/developer-survey/), delays in code review featured near the top of developer process painpoints. Code review is not as time consuming as testing (the unanimous winner in 2019 and 2020), but respondents aknowledged they need more help to speed up the code reviews. This initial release of Code Review Analytics is a first step toward providing greater insight into delays and bottlenecks during the code review process.\n\nYou can find the Code Review dashboard under the menu for your project, then _Project Analytics > Code Review_. The view is a table of open merge requests with at least one non-author comment, and review time is measured from the first non-author comment. You can also see a summary of the changes introduced by the merge request, the number of comments, commits, and the approvers needed. The default sort order is from the oldest merge request, but you can filter results using the search box above the table. By highlighting aged Code Reviews, teams are encouraged to complete work-in-process rather than picking up new items from the backlog and to dispose of the \"inventory\" waste of unmerged commits.\n\n![Code analytics dashboard](https://about.gitlab.com/images/blogimages/code_review_analytics.png){: .shadow.medium.center}\n\nClicking the title of the merge request takes you to a normal merge request view where you can recap the discussions and activity so far to debug problems such as:\n\n-   If there are many comments or commits, perhaps the code is too complex.\n-   If a particular author is involved, maybe more training is required.\n-   If no or few comments and approvers appear, your team may be understaffed or may be in the habit of starting new work instead of assisting teammates to close MRs and deliver features.\n\nWe will be bringing improvements and more features to code review analytics over the coming months, and in the meantime we welcome your feedback.\n\n### About the guest author\n\n_Chris is a freelance technical communicator for numerous developer-focused companies. Happy creating text, videos, courses, and interactive learning experiences, in his spare time he writes games and interactive fiction._\n",[4454,677,9],{"slug":6417,"featured":6,"template":680},"troubleshoot-delays-with-code-review-analytics","content:en-us:blog:troubleshoot-delays-with-code-review-analytics.yml","Troubleshoot Delays With Code Review Analytics","en-us/blog/troubleshoot-delays-with-code-review-analytics.yml","en-us/blog/troubleshoot-delays-with-code-review-analytics",{"_path":6423,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6424,"content":6430,"config":6435,"_id":6437,"_type":14,"title":6438,"_source":16,"_file":6439,"_stem":6440,"_extension":19},"/en-us/blog/two-questions-we-ask-ux-designers-in-job-interviews",{"title":6425,"description":6426,"ogTitle":6425,"ogDescription":6426,"noIndex":6,"ogImage":6427,"ogUrl":6428,"ogSiteName":667,"ogType":668,"canonicalUrls":6428,"schema":6429},"2 Questions we ask UX designers in job interviews (and why)","UX designer interviews are quite simple at GitLab. There are no trick questions – but here are two 'basic' ones that tell us a lot about you.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678794/Blog/Hero%20Images/ux-interviews.jpg","https://about.gitlab.com/blog/two-questions-we-ask-ux-designers-in-job-interviews","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"2 Questions we ask UX designers in job interviews (and why)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matej Latin\"}],\n        \"datePublished\": \"2018-10-25\",\n      }",{"title":6425,"description":6426,"authors":6431,"heroImage":6427,"date":6432,"body":6433,"category":299,"tags":6434},[1897],"2018-10-25","\nAs of 2022, we have updated our internal interview process for Product Designers to include a consistent set of questions for every candidate at each phase of the process. This will help us create fairness and improve the quality of our evaluation process. The following questions are no longer part of our interview process. You can read about our [current hiring process in our Product Designer job family](https://handbook.gitlab.com/job-families/product/product-designer/#hiring-process). \n{: .alert .alert-info .note}\n\nWe won’t ask you how many golf balls fit in a bus or how many times a day a clock’s hands overlap – nothing like what Google became famous for. While there's some value in seeing how candidates react to curve-ball questions, they don't really add much to a 45-minute interview. We also won't ask you to attend an all-day session with a series of interviewers.\n\nI think the [hiring process](https://handbook.gitlab.com/job-families/product/product-designer/#hiring-process) at GitLab is way simpler and more efficient. A successful candidate has to go through four stages of interviewing before receiving an offer. Altogether, we spend around 2-3 hours with them, so we need to ask the right questions to be efficient.\n\nI'm so confident in the efficiency of these questions that I’m completely okay with sharing these publicly. What you answer matters less than how you answer them.\n\n## 1. Can you speak to the difference between information architecture, interaction design, usability, and user research?\n\nI was asked this when was interviewing for the Senior UX designer position at GitLab. I wasn’t expecting such a ‘basic’ question, but I immediately realized how ingenious it is.\n\nHere’s what’s so brilliant about it: We're testing if the candidate has solid foundations for being a UX designer. With enough experience, explaining these terms should be a piece of cake, whereas struggling can be a red flag. Even if a candidate doesn’t have a formal education, they should be able to provide descriptions with their own words and ideally throw in snippets from their past experience.\n\nWe don’t focus on the correctness of the answer so much as the body language and level of confidence the candidate shows when replying. Someone who’s not experienced in these UX basics can Google the terms before the interview and even prepare notes but we’ll pick that up. The lack of confidence will be obvious in their body language, their voice, and the words they use to describe the terms. Candidates who lack experience all tend to use similar, generic descriptions for these terms and seem to talk a lot, but don’t actually say much.\n\n> We don’t focus on the correctness of the answer so much as the body language and level of confidence the candidate shows when replying\n\n## 2. Pick an application you like/dislike and explain why.\n\nThis may seem like another basic question but it’s great for finding out what kind of a designer and person the candidate is. This is what we're looking for:\n\n### Passion\n\nWe’re interested in your opinion about the product as a designer, and we want to see if you talk about it with passion. If you love the product, the passion will be clear through the words you use to describe it and whether your eyes light up when you talk about it. The same applies for a product that you dislike: you should dislike it with passion.\n\nThis question tells us immediately if the candidate is passionate about being a designer or not. I’m often surprised at how many designers out there became designers only because it’s hip or well paid. These are not good reasons for becoming a designer – passion for creating things that improve people’s lives is.\n\n### Attention to detail\n\nWe want to see examples of candidates talking about small visual design and UI details; about seemingly insignificant but delightful UX solutions that can make a user’s day. The way a candidate talks about visual design gives us an insight into candidate’s skills in this area (what they notice, what they learn and how they use and adapt elements in their own work). We’re looking for well-rounded people who can cover the whole design process.\n\nIf they talk about things that aren’t good, we want to hear how they would improve them. Everyone can criticize; few can find good and feasible solutions. In most cases, I really don’t need to see the app that the candidate talks about. The way they describe it usually tells me enough to make a judgement. Good candidates describe things so well that I can imagine them without looking at the product.\n\n> Everyone can criticize; few can find good and feasible solutions\n\nCommunication in design work is key, so being able to accurately describe the problems or the delightful things in a product or an app is a good indicator of those skills.\n\n### User’s point of view\n\nAs a designer, you should always consider other users and how they experience things. This can be the crucial point of the interview. If you only describe the app from your point of view and based on your experience, it will be a potential red flag. You shouldn’t have to conduct user testing to imagine what other people could have problems with. For example: are certain UI elements or the font size really small? This could be a serious problem for older people or people with certain health conditions. Does the app behave consistently? If not, it could cause usability problems. These are the sorts of things that we want to hear our candidates talk about – empathy for users is key.\n\n### Bonus points\n\nI have to give bonus points to candidates that take the initiative and offer to share their screen or show me their phone to show me the app they talk about. The candidate is in a challenging moment, outside of their comfort zone, and it’s reassuring to see them take the initiative in such occasions.\n\n## Our interviews aren’t tricky\n\nIf you’re a passionate designer with an appropriate level of experience for the position, that will be clear from how you speak about design and how you think about user problems. I prefer to see passion and commitment to the design profession than a formal education and numerous years of experience in a non-challenging environment. We look for well-rounded and passionate people with a wide range of skills matching their experience. If you think you’re a good match, you’re welcome to [check out our careers page](/jobs/). We look forward to meeting you in our interviews. Good luck!\n\nCover image by [Kaleidico](https://unsplash.com/photos/26MJGnCM0Wc?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/sketch?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,700,1698,810],{"slug":6436,"featured":6,"template":680},"two-questions-we-ask-ux-designers-in-job-interviews","content:en-us:blog:two-questions-we-ask-ux-designers-in-job-interviews.yml","Two Questions We Ask Ux Designers In Job Interviews","en-us/blog/two-questions-we-ask-ux-designers-in-job-interviews.yml","en-us/blog/two-questions-we-ask-ux-designers-in-job-interviews",{"_path":6442,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6443,"content":6448,"config":6453,"_id":6455,"_type":14,"title":6456,"_source":16,"_file":6457,"_stem":6458,"_extension":19},"/en-us/blog/unveiling-gitlabs-new-navigation",{"title":6444,"description":6445,"ogTitle":6444,"ogDescription":6445,"noIndex":6,"ogImage":5007,"ogUrl":6446,"ogSiteName":667,"ogType":668,"canonicalUrls":6446,"schema":6447},"Unveiling GitLab's new navigation","A whole new way to navigate.","https://about.gitlab.com/blog/unveiling-gitlabs-new-navigation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Unveiling GitLab's new navigation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarrah Vesselov\"}],\n        \"datePublished\": \"2017-09-13\",\n      }",{"title":6444,"description":6445,"authors":6449,"heroImage":5007,"date":6450,"body":6451,"category":299,"tags":6452},[2667],"2017-09-13","\n\nIn 9.4 we took a big step toward [improving our navigation](/blog/redesigning-gitlabs-navigation/) here at GitLab. After several rounds of research and testing, we released our redesigned navigation under a feature flag. We chose this method so that we could continue implementing improvements discovered in our original research while gathering real-world feedback from our users.\n\n\u003C!-- more -->\n\n## We heard you!\n\nWe received an incredible number of responses in the [issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/34917) created to gather feedback. The feedback gave us valuable insight into the many different types of workflows our users have. It reaffirmed some of the decisions made and challenged us to rethink others. Using this feedback, we iterated on the navigation for two release cycles, focusing on the changes that would add the most benefit. Here are some of the high-level additions we made:\n\n### Collapsible sidebar and addition of icons – [#34028](https://gitlab.com/gitlab-org/gitlab-ce/issues/34028)\n\nFrom the beginning, we knew that the sidebar would need to be collapsible in order to maximize screen space. With the right sidebar present in issues and merge requests, we didn’t want to box you in. The addition of icons enabled us to collapse the sidebar down to a mere 50px.\n\n{: .text-center}\n![collapsible menu](https://about.gitlab.com/images/blogimages/unveiling-gitlabs-new-navigation/menu-loop.gif){: .shadow}\n\n### Flyout menu – [#34026](https://gitlab.com/gitlab-org/gitlab-ce/issues/34026)\n\nA fly-out menu has been introduced in order to reduce the number of clicks and the time necessary to access a sub-page. Now, if you want to access Issue Boards, there is no need to click on Issues and wait for the initial ‘Issue List’ to load. When hovering over a section with second-level items, the fly-out drop-down menu will appear to offer quick access to those second-level sections.\n\n{: .text-center}\n![flyout menu](https://about.gitlab.com/images/blogimages/unveiling-gitlabs-new-navigation/flyouts.png){: .shadow}\n\nWe've also adjusted the hover color of the menu items after many of you expressed that the intensity of the color was harsh and distracting. The colors changed from purple to whites and grays without sacrificing the overall contrast.\n\n### Dropdown links in top bar – [#35010]( https://gitlab.com/gitlab-org/gitlab-ce/issues/35010)\n\nNo more clicking on Projects and waiting for the Projects page to load! In order to provide quicker access to projects, a dropdown has been added to the Projects link in the top bar. The dropdown opens on click, following the behavior of the + button and personal dropdowns in the top bar.\n\n{: .text-center}\n![dropdown links](https://about.gitlab.com/images/blogimages/unveiling-gitlabs-new-navigation/dropdown-links.png){: .shadow}\n\nThe dropdown contains direct links to the different subsections of the Projects dashboard (Your Projects, Starred Projects and Explore projects). Better still, on the right-hand side of the dropdown is a list of your most frequently accessed projects. A search box allows you to navigate to your projects that are not present in the list.\n\n### Navigation color themes – [#35012](https://gitlab.com/gitlab-org/gitlab-ce/issues/35012)\n\nOn the subject of colors, one of the most requested features was the ability to change the navigation colors. Previous versions of GitLab allowed users to customize the navigation sidebar with a color theme. Many used this to differentiate between different GitLab instances. The new navigation presented the opportunity to bring back this valuable feature! The default palette will remain indigo, based on the GitLab identity. You will now be able to choose between four additional color schemes; Dark, Light, Blue, and Green.\n\n{: .text-center}\n![navigation color themes](https://about.gitlab.com/images/blogimages/unveiling-gitlabs-new-navigation/color-theme.png){: .shadow}\n\n### Improved breadcrumbs – [#35269](https://gitlab.com/gitlab-org/gitlab-ce/issues/35269)\n\nWe received a lot of feedback on the breadcrumbs. While many of you found them to be helpful, many also found them to be repetitive, inconsistent, and taking up too much overall space. We began by removing GitLab from the start of the breadcrumbs and moving all breadcrumb items onto one line. In order to improve the movement between elements in the breadcrumb, we replaced the slashes with chevrons. We also removed the action buttons from the breadcrumb bar altogether.\n\n{: .text-center}\n![action buttons moved](https://about.gitlab.com/images/blogimages/unveiling-gitlabs-new-navigation/action-remove.png){: .shadow}\n\nWhen multiple subgroups are present, we place them inside of an ellipsis button. This reduces the cognitive load while keeping them accessible. For each breadcrumb element, we have fixed the min-width and the max-width to make sure the whole breadcrumb contracts and expands according to the available space.\n\n{: .text-center}\n![breadcrumbs](https://about.gitlab.com/images/blogimages/unveiling-gitlabs-new-navigation/breadcrumbs.png){: .shadow}\n\nThe breadcrumb labels themselves are more consistent and intuitive. A list of the paths and corresponding breadcrumb titles can be found in the [issue description](https://gitlab.com/gitlab-org/gitlab-ce/issues/35269).\n\n### Reduce header height and redesign active/hover/dropdown styles – [#35424]( https://gitlab.com/gitlab-org/gitlab-ce/issues/35424)\n\nWe reduced the overall header height to give you as much vertical screen space as possible. By popular request, all global links are shown by default and collapse into the 'More' dropdown as space gets tighter. The header active/hover/dropdown styles have been redesigned with a bold new style and Todo/Issue/MR badges are centered to the icons themselves.\n\n{: .text-center}\n![active state](https://about.gitlab.com/images/blogimages/unveiling-gitlabs-new-navigation/active-states.png){: .shadow}\n\n{: .text-center}\n![notifications](https://about.gitlab.com/images/blogimages/unveiling-gitlabs-new-navigation/to-do.png){: .shadow}\n\n\n## Further iteration\n\nWe feel confident that GitLab’s overall navigation has been greatly improved over the last two releases. That is why, as of the 10.0 release, we will remove it from the feature flag and make it the only way to navigate. As always here at GitLab, everything is in draft. We will continue to monitor feedback, test, and iterate.\n\n## Upcoming efforts\n\nLooking forward, the UX team has some big things planned. In addition to improving user flows, we are working hard to increase the overall quality and polish of the UX experience. Stay tuned for a series of blog posts dedicated to explaining our processes as we work on the following key initiatives:\n\n- Change chromatic/full colors to a more harmonious palette [#28614](https://gitlab.com/gitlab-org/gitlab-ce/issues/28614)\n- Establish a proper type ramp to improve contrast and readability [#24310](https://gitlab.com/gitlab-org/gitlab-ce/issues/24310)\n- Iconography is a powerful visual cue to the user and should reflect our particular sense of style [#32894](https://gitlab.com/gitlab-org/gitlab-ce/issues/32894)\n- Architect design process for maintaining master files/symbols team-wide [#26](https://gitlab.com/gitlab-org/gitlab-design/issues/26)\n",[700,9],{"slug":6454,"featured":6,"template":680},"unveiling-gitlabs-new-navigation","content:en-us:blog:unveiling-gitlabs-new-navigation.yml","Unveiling Gitlabs New Navigation","en-us/blog/unveiling-gitlabs-new-navigation.yml","en-us/blog/unveiling-gitlabs-new-navigation",{"_path":6460,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6461,"content":6466,"config":6472,"_id":6474,"_type":14,"title":6475,"_source":16,"_file":6476,"_stem":6477,"_extension":19},"/en-us/blog/update-free-software-and-telemetry",{"title":6462,"description":6463,"ogTitle":6462,"ogDescription":6463,"noIndex":6,"ogImage":2010,"ogUrl":6464,"ogSiteName":667,"ogType":668,"canonicalUrls":6464,"schema":6465},"Update on free software and telemetry (Updated October 29th, 2019)","Telemetry services and GitLab. (GitLab CE will continue to be free software)","https://about.gitlab.com/blog/update-free-software-and-telemetry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update on free software and telemetry (Updated October 29th, 2019)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Scott Williamson\"}],\n        \"datePublished\": \"2019-10-10\",\n      }",{"title":6462,"description":6463,"authors":6467,"heroImage":2010,"date":6469,"body":6470,"category":299,"tags":6471},[6468],"Scott Williamson","2019-10-10","\n\n> **2019-10-29 UPDATE:** The following email is going out to all GitLab users: \n\n\nDear GitLab users and customers,\n\nOn October 23, we sent an email entitled “Important Updates to our Terms of Service and Telemetry Services” announcing upcoming changes. Based on considerable feedback from our customers, users, and the broader community, we reversed course the next day and removed those changes before they went into effect. Further, GitLab will commit to not implementing telemetry in our products that sends usage data to a third-party product analytics service. This clearly struck a nerve with our community and I apologize for this mistake.\n\nSo, what happened? In an effort to improve our user experience, we decided to implement user behavior tracking with both first and third-party technology. Clearly, our evaluation and communication processes for rolling out a change like this were lacking and we need to improve those processes. But that’s not the main thing we did wrong.\n\nOur main mistake was that we did not live up to our own core [value of collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) by including our users, contributors, and customers in the strategy discussion and, for that, I am truly sorry. It shouldn’t have surprised us that you have strong feelings about opt-in/opt-out decisions, first versus third-party tracking, data protection, security, deployment flexibility and many other topics, and we should have listened first.\n\nSo, where do we go from here? The first step is a retrospective that is happening on October 29 to document what went wrong. We are reaching out to customers who expressed concerns and collecting feedback from users and the wider community. We will put together a new proposal for improving the user experience and share it for feedback. We made a mistake by not collaborating, so now we will take as much time as needed to make sure we get this right. You can be part of the collaboration by posting comments in [this issue](https://gitlab.com/gitlab-com/www-gitlab-com/issues/5672). If you are a customer, you may also reach out to your GitLab representative if you have additional feedback.\n\nI am glad you hold GitLab to a higher standard. If we are going to be transparent and collaborative, we need to do it consistently and learn from our mistakes.\n\nSincerely,\n\nSid Sijbrandij\n\nCo-Founder and CEO\n\nGitLab\n\n\n> **2019-10-24 UPDATE**: We've heard your concerns and questions and have rolled back any changes to our Terms of Service. We’re going to process the feedback and rethink our approach. We will not activate user level product usage tracking on GitLab.com or GitLab self-managed before we address the feedback and re-evaluate our plan. We will make sure to communicate our proposed changes prior to any changes to GitLab.com or self-managed instances, and give sufficient time for people to provide feedback for a new proposal. We'll work in [this issue](https://gitlab.com/gitlab-com/www-gitlab-com/issues/5672).\n\nFour years ago, there was a [guest blog post and discussion about free software and GitLab](/blog/gitlab-gitorious-free-software/). That discussion has continued to inform GitLab’s free software philosophy for years and has served as a guiding light for making decisions on how we strike an appropriate balance in our [open core](/blog/thoughts-on-open-source/) strategy. On one hand, we [value results](https://handbook.gitlab.com/handbook/values/#results) and we believe an open core model is the best path to achieve that. It also means making our products better as fast as possible for our customers and users. On the other hand, we want to make sure that users who prefer using only free software can have a positive GitLab experience, as open source communities are important to GitLab.\n\nTo make GitLab better faster, we need more data on how users are using GitLab. SaaS telemetry products, which provide analytics on user behavior inside web-based applications, have come a long way in the past few years. They are an important tool for rapidly improving user experiences because you can understand what users are doing (or not doing) in the app. GitLab has a lot of features, and a lot of users, and it is time that we use telemetry to get the data we need for our product managers to improve the experience.\n\nMost of these tools use JavaScript snippets (similar to Google Analytics) that execute in the user’s browser and send information back to the telemetry service. While there are open source options, the leading commercial telemetry solutions often use proprietary JavaScript snippets. For a majority of users, disclosure of JavaScript usage in a privacy policy, along with describing how we are going to use the data, may be sufficient. But we also recognize that users who prefer only free software may have concerns.\n\nSo, we are planning some changes that I will describe below. But rest assured, a very important thing is not changing: [GitLab Community Edition](/install/ce-or-ee/) will continue to be free software with no changes. If you want to install your own instance of GitLab without proprietary software, GitLab Community Edition (CE) remains a great option, as it is licensed under the MIT License. Many open source software projects use GitLab CE for their SCM and CI needs, and nothing is changing with GitLab CE.\n\n## Planned changes\n\n[GitLab.com (GitLab’s SaaS offering)](/pricing/#gitlab-com) and [GitLab's proprietary Self-Managed packages (Starter, Premium, and Ultimate)](/pricing/#self-managed) will now include additional Javascript snippets (both open source and proprietary) that will interact with both GitLab and possibly third-party SaaS telemetry services (we will be using Pendo). We will disclose all such usage in our [privacy policy](/privacy/), as well as what we are using the data for. We will also ensure that any third-party telemetry service we use will have data protection standards at least as strong as GitLab, and will aim for SOC2 compliance (Pendo is SOC2 compliant).\n\nIn order to service the needs of GitLab.com and GitLab Self-Managed users who do not want to be tracked, both GitLab.com and GitLab Self-Managed will honor the [Do Not Track (DNT)](https://en.wikipedia.org/wiki/Do_Not_Track) mechanism in web browsers. This means that, if you turn on Do Not Track in your browser, GitLab will not load the JavaScript snippet. The only downside to this is that users may also not get the benefit of in-app messaging or guides that some third-party telemetry tools have that would require the JavaScript snippet.\nOverall, we believe these changes will continue to help us achieve results in improving our product experience for users, while also giving choice to users who only want free software. Please let us know your thoughts.\n\n",[675,9],{"slug":6473,"featured":6,"template":680},"update-free-software-and-telemetry","content:en-us:blog:update-free-software-and-telemetry.yml","Update Free Software And Telemetry","en-us/blog/update-free-software-and-telemetry.yml","en-us/blog/update-free-software-and-telemetry",{"_path":6479,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6480,"content":6486,"config":6491,"_id":6493,"_type":14,"title":6494,"_source":16,"_file":6495,"_stem":6496,"_extension":19},"/en-us/blog/use-gitlab-with-vscode",{"title":6481,"description":6482,"ogTitle":6481,"ogDescription":6482,"noIndex":6,"ogImage":6483,"ogUrl":6484,"ogSiteName":667,"ogType":668,"canonicalUrls":6484,"schema":6485},"How we created a GitLab Workflow Extension for VS Code","Now you can leverage GitLab from within Visual Studio Code with our official GitLab Workflow Extension.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681469/Blog/Hero%20Images/gitlab-vscode-blog-image2.jpg","https://about.gitlab.com/blog/use-gitlab-with-vscode","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we created a GitLab Workflow Extension for VS Code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Roman Kuba\"}],\n        \"datePublished\": \"2020-07-31\",\n      }",{"title":6481,"description":6482,"authors":6487,"heroImage":6483,"date":6488,"body":6489,"category":743,"tags":6490},[4218],"2020-07-31","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-08-04.\n{: .alert .alert-info .note}\n\nThe people who work at GitLab are encouraged to build the things they want and need, which helps us expand the ways we work with our growing product. We're thrilled to announce that we've created an official GitLab Workflow Extension for VS Code.\n\n## How did we get here?\n\n[More than two years ago](/blog/gitlab-vscode-extension/), [Fatih Acet](https://gitlab.com/fatihacet), a former senior frontend engineer, [Plan](/handbook/engineering/development/dev/plan-project-management/), started working on a [VS Code extension](/blog/gitlab-vscode-extension/) to allow users to interact with GitLab from within their code editor. At GitLab, [everything starts with a Merge Request](/handbook/communication/#start-with-a-merge-request), which is exactly how Fatih started building the extension. Fatih, along with more than 25 contributors, continued to expand on the extension by adding new features. The extension has been installed more than 160,000 times.\n\nIt’s been remarkable to see the way the community collaborated to build the extension, making it a tool that is valuable to their work. The GitLab Workflow Extension is the perfect case study of how [developers can create meaningful work at this company](/direction/create/editor_extension/#where-we-are-headed).\n\nWhen Fatih decided to move on from GitLab in March 2020, we had an opportunity to take over the GitLab Workflow Extension, turning it into a tool GitLab would officially maintain and support. We jumped at the opportunity to maintain an auxilary project outside of the main GitLab project. As we continue to move fast and create the best experiences possible for our users, we expect this extension to become a [key component of our strategy](/direction/create/editor_extension/#overview).\n\n## How to use the extension\n\nIf you want to start using the extension, you can install it from within VS Code directly by searching for [GitLab Workflow](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow) which is now published through an official GitLab account.\n\nIf you were already using the extension, it automatically updated to the GitLab publisher, and you might have already seen a few updates coming in.\n\n## What improvements have we made?\n\nWhen we took over the extension, we worked with other teams across GitLab to immediately perform an [application security review](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/170). Along the way, we made sure to create a [security release-process](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/master/docs/security-releases.md). We did this to ensure that users were safe to continue using the extension and so that we could fix any problems that surface. We also worked through some automation to help with publishing the extension and [begin to lay a foundation for future testing](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/199).\n\nWe also shipped [version 3.0.0](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/master/CHANGELOG.md#v300-2020-06-25) which was spearheaded by our community and helped to resolve some long-standing bugs and issues. The extension has come a long way in just a few short weeks. We’re excited by the progress we’re making and the engagement we’re continuing to see, but there is still a lot that needs to be done.\n\n## What’s next?\n\nNothing in software development is perfect, and so we are aware of the shortcomings of the extension, some inconsistencies, and some long open feature requests. You can see our many to-dos on our GitLab Workflow Extension [issues list](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues). For now, we’re focused on triaging the existing issues and capturing any new bugs. You should see much more involvement from our [Create:Editor](/handbook/engineering/development/dev/create/ide/) team as we continue these efforts, and we’re looking forward to engaging with the community on these items.\n\nWe’re also evaluating the best path forward for maintaining the extension, by focusing on the test-suite and code-quality, so we won’t break things by accident. You can join us in our discussion on [this issue](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/204). While this might slow down some new feature releases in the short-term, we’re confident these are the right long-term decisions to ensure you have an extension you can trust, so you can make the GitLab Extension an integral part of your workflow.\n\n## Everyone can contribute\n\nThe extension is open source, and we're improving the \"[How to Contribute](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/master/CONTRIBUTING.md)\" guides alongside some other documentation. We want to have a space where everyone can contribute and make this extension better for all of us.\n\n## Check out more engineering content on GitLab\n\n- [How to build containers with the AWS Fargate Custom Executor for GitLab Runner and AWS CodeBuild](/blog/aws-fargate-codebuild-build-containers-gitlab-runner/)\n- [How application security engineers can use GitLab to secure their projects](/blog/secure-stage-for-appsec/)\n- [Best practices to keep your Kubernetes runners moving](/blog/best-practices-for-kubernetes-runners/)\n\n\n## Read more on Visual Studio and GitLab:\n\n- [Four new tools for your Visual Studio Code and GitLab tool belt](/blog/vscode-workflow-new-features/)\n\n- [Visual Studio code editor: Eight tips for using GitLab VS Code](/blog/vscode-workflows-for-working-with-gitlab/)\n\n- [VS Code extension development with GitLab](/blog/vscode-extension-development-with-gitlab/)\n\n- [How to do GitLab merge request reviews in VS Code](/blog/mr-reviews-with-vs-code/)\n\n",[722,9],{"slug":6492,"featured":6,"template":680},"use-gitlab-with-vscode","content:en-us:blog:use-gitlab-with-vscode.yml","Use Gitlab With Vscode","en-us/blog/use-gitlab-with-vscode.yml","en-us/blog/use-gitlab-with-vscode",{"_path":6498,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6499,"content":6505,"config":6510,"_id":6512,"_type":14,"title":6513,"_source":16,"_file":6514,"_stem":6515,"_extension":19},"/en-us/blog/using-gitlab-ci-to-build-gitlab-faster",{"title":6500,"description":6501,"ogTitle":6500,"ogDescription":6501,"noIndex":6,"ogImage":6502,"ogUrl":6503,"ogSiteName":667,"ogType":668,"canonicalUrls":6503,"schema":6504},"How we used GitLab CI to build GitLab faster","Here's how we went from a daily manual merge of GitLab Core into GitLab Enterprise to automated merges every three hours.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665440/Blog/Hero%20Images/automate-ce-ee-merges.jpg","https://about.gitlab.com/blog/using-gitlab-ci-to-build-gitlab-faster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used GitLab CI to build GitLab faster\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rémy Coutable\"}],\n        \"datePublished\": \"2018-05-02\",\n      }",{"title":6500,"description":6501,"authors":6506,"heroImage":6502,"date":6507,"body":6508,"category":743,"tags":6509},[5864],"2018-05-02","\n\nGitLab is an [open source project], but also a [commercial project]. For historic\nreasons, we have two Git repositories: [`gitlab-ce`] for GitLab Core and\n[`gitlab-ee`] for GitLab Enterprise packages (you can read [our recent blog post explaining GitLab self-managed tiers](/blog/gitlab-tiers/)).\nWhile we're working on having a [single codebase], we still need to regularly\nmerge [`gitlab-ce`] into [`gitlab-ee`] since most of the development happens on\nGitLab Core, but we also develop features on top of it for GitLab Starter, Premium, and Ultimate.\n\n## How we used to merge GitLab CE into GitLab EE\n\nUntil December 2017, the merge of [`gitlab-ce`] into [`gitlab-ee`] was manual\non a daily basis with basically the following commands ([see the full documentation]):\n\n```shell\n# the `origin` remote refers to https://gitlab.com/gitlab-org/gitlab-ee.git\n# the `ce` remote refers to https://gitlab.com/gitlab-org/gitlab-ce.git\ngit fetch origin master\ngit checkout -b ce-to-ee origin/master\ngit fetch ce master\ngit merge --no-ff ce/master\n```\n\nAt this point, since we'd merge a day's worth of GitLab Core's new commits,\nchances were good we'd see conflicts.\nMost of the time, the person responsible for this process would handle the\nconflict resolutions, commit them and push the `ce-to-ee` branch to GitLab.com.\n\nThere were a few problems with this approach:\n\n- GitLab's development pace is fast, which means the longer we go without a\n  merge, the more changes there are and thus more opportunities for conflicts\n- If we had many conflicts, it could take a significant amount of time for the\n  developer responsible for the merge\n- The developer performing the merge wasn't always the best person to resolve the\n  conflicts\n- Significant time was spent identifying and notifying developers to help resolve conflicts\n\n## The solution\n\nOur plan was to have a single script that would automate the merge, and in the\ncase of conflicts, identify the person best suited to resolve each of them.\nIt would then create the merge request using the [GitLab API] and a\n[GitLab API Ruby wrapper], and post a message in Slack when a new merge request\nwas created or an existing one was still pending.\n\nFinally, we'd use GitLab's [pipeline schedules] to run the script every three hours.\n\n### Step 1: Write the script\n\nWe chose to write the script in our [`release-tools`] project, since it already\nhad a strong foundation for working with the relevant Git repositories.\n\nThis script was written iteratively as a set of classes over the course of a few\nmonths:\n\n1. [Add the ability to find/create a merge request][!139]\n1. [Move remotes to the `Project` classes and get rid of the `Remotes` class][!168]\n1. [Add `head`, `status`, `log`, `fetch`, `checkout_new_branch`, `pull`, `push`, and `merge` to `RemoteRepository`][!177]\n1. [Introduce a new `CommitAuthor` class][!197]\n\nThe last piece of the puzzle was the new [`upstream_merge` Rake task][!219].\n\n### Step 2: Create a pair of SSH keys and add the public key to the `gitlab-ee` project\n\nUnder **Repository Settings > Deploy Keys** of the [`gitlab-ee`] project:\n\n![Deploy key in `gitlab-ee`](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step2.png){: .shadow.center.medium}\n\n### Step 3: Create secret variables in the `release-tools` project\n\nUnder **CI / CD Settings** of the [`release-tools`] project, create three secret\nvariables:\n\n- `AUTO_UPSTREAM_MERGE_BOT_SSH_PRIVATE_KEY` for the SSH private key\n- `GITLAB_API_PRIVATE_TOKEN` is a personal access token for our [`@gitlab-bot`]\n  user\n- `SLACK_UPSTREAM_MERGE_URL` which is the Slack webhook URL we created\n  specifically for this job and used in our [`Slack::UpstreamMergeNotification` class]\n\n![Secret variable](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step3.png){: .shadow.center.medium}\n\n### Step 4: Add a new CI job that runs the `upstream_merge` Rake task for pipeline schedules only\n\n*This was heavily inspired by [GitBot – automating boring Git operations with CI].*\n\nCreate a new `upstream-merge` CI job that:\n\n- Adds the SSH private key to the `~/.ssh` folder\n- Add `gitlab.com` to the `~/.ssh/known_hosts` file\n- Runs `bundle exec rake upstream_merge`\n\n![`upstream-merge` job](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step4.png){: .shadow.center.medium}\n\nYou can [check out the task for yourself](https://gitlab.com/gitlab-org/release-tools/blob/1cd437823113d4529919c29b177bb2037c19fc3c/.gitlab-ci.yml#L50-64).\n\n### Step 5: Create a pipeline schedule that runs every three hours\n\nUnder **Schedules** of the [`release-tools`] project:\n\n![Pipeline schedule](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step5.png){: .shadow.center.medium}\n\n### Step 6: Let the bot work for us!\n\n**The CI job:**\n\n![CI job](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step6-1.png){: .shadow.center.medium}\n\n**The Slack messages:**\n\n![Slack messages](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step6-2.png){: .shadow.center.medium}\n\n**The merge request:**\n\n![Merge request](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step6-3.png){: .shadow.center.medium}\n\n## What are the benefits?\n\nSince we started automating this process in December 2017, our dear\n[`@gitlab-bot`] created no fewer than [229 automatic merges], and we started\nnoticing the benefits immediately:\n\n- Automating the merge request creation saved developers time and removed a manual\nchore.\n- Automatically identifying the developer who introduced a conflict and assigning\nthem to resolve it spread out the workload and reduced bugs caused by improper\nconflict resolution.\n- Performing the merge automatically every three hours instead of manually once a\nday led to fewer changes at a time and a reduced number of conflicts.\n\nThe last, perhaps least visible, but most important benefit, is that we reduced\ndeveloper frustration and increased happiness by removing a tedious chore.\n\n[Photo](https://unsplash.com/photos/w6OniVDCfn0?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Max Ostrozhinskiy on [Unsplash](https://unsplash.com/search/photos/build?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n[open source project]: /community/contribute/\n[commercial project]: /pricing/\n[`gitlab-ce`]: https://gitlab.com/gitlab-org/gitlab-ce\n[`gitlab-ee`]: https://gitlab.com/gitlab-org/gitlab-ee\n[single codebase]: https://gitlab.com/gitlab-org/gitlab-ee/issues/2952\n[see the full documentation]: https://gitlab.com/gitlab-org/release/docs/blob/master/general/merge-ce-into-ee.md\n[pipeline schedules]: https://docs.gitlab.com/ee/ci/pipelines/schedules.html\n[GitLab API]: https://docs.gitlab.com/ee/api/merge_requests.html\n[GitLab API Ruby wrapper]: https://rubygems.org/gems/gitlab\n[`release-tools`]: https://gitlab.com/gitlab-org/release-tools/\n[!139]: https://gitlab.com/gitlab-org/release-tools/merge_requests/139\n[!168]: https://gitlab.com/gitlab-org/release-tools/merge_requests/168\n[!177]: https://gitlab.com/gitlab-org/release-tools/merge_requests/177\n[!197]: https://gitlab.com/gitlab-org/release-tools/merge_requests/197\n[!219]: https://gitlab.com/gitlab-org/release-tools/merge_requests/219\n[`Slack::UpstreamMergeNotification` class]: https://gitlab.com/gitlab-org/release-tools/blob/1cd437823113d4529919c29b177bb2037c19fc3c/lib/slack/upstream_merge_notification.rb#L7\n[GitBot – automating boring Git operations with CI]: /2017/11/02/automating-boring-git-operations-gitlab-ci/\n[229 automatic merges]: https://gitlab.com/gitlab-org/gitlab-ee/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&label_name[]=CE%20upstream&author_username=gitlab-bot\n[`@gitlab-bot`]: https://gitlab.com/gitlab-bot\n",[9,1090],{"slug":6511,"featured":6,"template":680},"using-gitlab-ci-to-build-gitlab-faster","content:en-us:blog:using-gitlab-ci-to-build-gitlab-faster.yml","Using Gitlab Ci To Build Gitlab Faster","en-us/blog/using-gitlab-ci-to-build-gitlab-faster.yml","en-us/blog/using-gitlab-ci-to-build-gitlab-faster",{"_path":6517,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6518,"content":6524,"config":6529,"_id":6531,"_type":14,"title":6532,"_source":16,"_file":6533,"_stem":6534,"_extension":19},"/en-us/blog/using-gitlab-to-manage-house-renovation-priorities",{"title":6519,"description":6520,"ogTitle":6519,"ogDescription":6520,"noIndex":6,"ogImage":6521,"ogUrl":6522,"ogSiteName":667,"ogType":668,"canonicalUrls":6522,"schema":6523},"Using GitLab to project manage home renovation priorities","Solutions Architect Brendan O'Leary shares how he and his family use GitLab Issue Boards for an unconventional purpose: home improvement prioritization!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680235/Blog/Hero%20Images/home-improvement.jpg","https://about.gitlab.com/blog/using-gitlab-to-manage-house-renovation-priorities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using GitLab to project manage home renovation priorities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2018-02-08\",\n      }",{"title":6519,"description":6520,"authors":6525,"heroImage":6521,"date":6526,"body":6527,"category":299,"tags":6528},[3673],"2018-02-08","\n\nLast summer my wife and I bought a new house for our ever-growing family. Before we moved in, we had a couple of improvements made – wood floors to replace the aging carpet in the master bedroom, some required structural fixes. However, when we bought the house, we knew there would be a lot more we wanted to do over the years. When it came to organizing those ideas into things that need to happen sooner rather than later and those that could wait, however, we found ourselves struggling to keep all of the plans in order.\n\n\u003C!-- more -->\n\n## Trying to get organized\n\nI've been able to complete a few other projects since we moved in – but most were small in scale. A built-in shelf wall for my wife's office, painting and staining the new deck, and of course a DIY standing desk to use in my new office kitchen (which is also the house's kitchen... [working from home for the win!](/company/culture/all-remote/)). These projects were great, but we needed a way to organize and prioritize larger renovation projects.\n\n![Home improvement examples](https://about.gitlab.com/images/blogimages/home-improvement-examples.png){: .shadow}\n\n*\u003Csmall>Clockwise, from left: built-in shelf wall, painted and stained deck, DIY standing desk\u003C/small>*\n\nI was a GitLab user for years before I even became a GitLab team-member. I've always hosted my side-project code in GitLab.com since GitLab offers [unlimited private repositories](/pricing/#gitlab-com) for free. For project management in my \"day job\" I've used dozens of other tools outside of GitLab, so when I joined it was the first time I saw the full breadth of what GitLab offers in issue management.\n\nIn thinking about the other tools I've used in the past, they didn't seem to meet the full bar of what I was looking for to solve our problem. As a mother of four young children, my wife is always on the go... but I'm on a computer all day long. So we needed something that worked seamlessly between platforms. We also needed to be able to easily re-arrange and re-prioritize items. Also, I fancy myself a bit of a DIY-er, so I wanted to be able to label some items as at least *possible* for me to maybe complete myself. All of these requirements had me wondering what tool would be best for my wife and me to collaborate on.\n\n## Enter GitLab Issue Boards\n\nWith these requirements, and my newfound GitLab knowledge, I was able to come up with a novel solution to the problem we were having: why not use a [GitLab Issue Board](/stages-devops-lifecycle/issueboard/) to manage our ever-changing home renovation priorities?\n\nWith Issue Boards, we would have a fantastic solution for mobile and desktop (shout out to the [GitLab UX team](https://docs.gitlab.com/ee/development/ux_guide/)!). With [labels](https://docs.gitlab.com/ee/user/project/labels.html), I could organize and group issues however we wanted. And the customizable columns would allow us to prioritize, track and manage the various issues and ideas.\n\n## How the board works\n\nTo start, I [created a new group on GitLab.com](https://gitlab.com/groups/new) to house (pun intended) everything for our family. I made a project in that group called `priorities` to be the central place to collect all the renovation ideas we had. In the future, I may have a project for a specific renovation, managing purchases, and contractors, etc.\n\nAs with every GitLab project, issues and issue boards were baked right in. I started adding issues right away – beginning with those that were at the top of mind, like the water heater that is at the end of its usable life, repairs to our front entryway, and window replacement.  My wife didn't have a GitLab.com account yet, but it was easy to add her to the project as a member just by putting her email address in on the member's page, allowing her to sign up and get access to the project in one step.\n\n![Invite member by e-mail](https://about.gitlab.com/images/blogimages/invite-member-by-email.png){: .shadow}\n\nTo get organized, I created a few labels: `P1` for top priority items, `DIY Possibility` for those I might be able to tackle on my own, and `Furniture` for those that involved furnishing various rooms. The labels will help filter issues so that if I find a free weekend, I can search for `DIY Possibility` issues to maybe get started on. Or if we go to a furniture store, we could filter to those issues to get an idea of cost while we are there.\n\nFor the board columns, I decided to use `P1` as the first column after Backlog to highlight those issues. From there, it's a matter of agreeing on an organization of priority 😃\n\n![Home improvement issue board](https://about.gitlab.com/images/blogimages/home-improvement-issue-board.png){: .shadow}\n\n## Where to go next\n\nNow it's time to execute! One thing we didn't account for in the first iteration was the scope of issues. Some things were relatively minor regarding time and investment. Others (like replacing all 27 windows!) are larger projects for which we need to budget. For this, we will be using [issue weight](https://docs.gitlab.com/ee/user/project/issues/issue_weight.html) to understand how different projects align with budget and time investment to pull off.\n\nIt's been an exciting experience using GitLab Issue Boards for something outside of the development space. We'd love to hear from you too about \"non-standard\" uses for GitLab's features. Feel free to comment on this post or tweet us [@GitLab](https://twitter.com/gitlab).\n\n*Cover photo by [George Pastushok](https://unsplash.com/photos/d0yNnTEjEWY?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)*\n{: .note}\n",[9,4630],{"slug":6530,"featured":6,"template":680},"using-gitlab-to-manage-house-renovation-priorities","content:en-us:blog:using-gitlab-to-manage-house-renovation-priorities.yml","Using Gitlab To Manage House Renovation Priorities","en-us/blog/using-gitlab-to-manage-house-renovation-priorities.yml","en-us/blog/using-gitlab-to-manage-house-renovation-priorities",{"_path":6536,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6537,"content":6542,"config":6548,"_id":6550,"_type":14,"title":6551,"_source":16,"_file":6552,"_stem":6553,"_extension":19},"/en-us/blog/using-run-parallel-jobs",{"title":6538,"description":6539,"ogTitle":6538,"ogDescription":6539,"noIndex":6,"ogImage":4174,"ogUrl":6540,"ogSiteName":667,"ogType":668,"canonicalUrls":6540,"schema":6541},"How we used parallel CI/CD jobs to increase our productivity","GitLab uses parallel jobs to help long-running jobs run faster.","https://about.gitlab.com/blog/using-run-parallel-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used parallel CI/CD jobs to increase our productivity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Miguel Rincon\"}],\n        \"datePublished\": \"2021-01-20\",\n      }",{"title":6538,"description":6539,"authors":6543,"heroImage":4174,"date":6545,"body":6546,"category":743,"tags":6547},[6544],"Miguel Rincon","2021-01-20","\n\nAt GitLab, we must verify simultaneous changes from the hundreds of people that contribute to GitLab each day. How can we help them contribute efficiently using our pipelines?\n\nThe pipelines that we use to build and verify GitLab have more than 90 jobs. Not all of those jobs are equal. Some are simple tasks that take a few seconds to finish, while others are long-running processes that must be optimized carefully.\n\nAt the time of this writing, we have more than 700 [pipelines running](https://gitlab.com/gitlab-org/gitlab/-/pipelines?page=1&scope=all&status=running). Each of these pipelines represent changes from team members and contributors from the wider community. All GitLab contributors must wait for the pipelines to finish to make sure the change works and integrates with the rest of the product. We want our pipelines to finish as fast as possible to maintain the productivity of our teams.\n\nThis is why we constantly monitor the duration of our pipelines. For example, in December 2020, successful merge request pipelines had a duration of [53.8 minutes](/handbook/engineering/quality/performance-indicators/#average-merge-request-pipeline-duration-for-gitlab):\n\n![Average pipeline duration was 53.8 minutes in December](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/historical-pipeline-duration.png){: .shadow.medium.center}\nThe average pipeline took 53.8 minutes to finish in December.\n{: .note.text-center}\n\nGiven that we run [around 500 merge request pipelines](https://gitlab.com/gitlab-org/gitlab/-/pipelines/charts) per day, we want to know: Can we optimize our process to change how long-running jobs _run_?\n\n## How we fixed our bottleneck jobs by making them run in parallel\n\nThe `frontend-fixtures` job uses `rspec` to generate mock data files, which are then saved as files called \"fixtures\". These files are loaded by our frontend tests, so the `frontend-fixtures` must finish before any of our frontend tests can start.\n\n> As not all of our tests need these frontend fixtures, many jobs use the [`needs` keyword](https://docs.gitlab.com/ee/ci/yaml/#needs) to start before the `frontend-fixtures` job is done.\n\nIn our pipelines, this job looked like this:\n\n![The `frontend-fixtures` job](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job.png){: .shadow.medium.center}\nInside the frontend fixtures job.\n{: .note.text-center}\n\n\nThis job had a normal duration of 20 minutes, and each individual fixture could be generated independently, so we knew there was an opportunity to run this process in parallel.\n\nThe next step was to configure our pipeline to split the job into multiple batches that could be run in parallel.\n\n## How to make frontend-fixtures a parallel job\n\nFortunately, GitLab CI provides an easy way to run a job in parallel using the [`parallel` keyword](https://docs.gitlab.com/ee/ci/yaml/#parallel). In the background, this creates \"clones\" of the same job, so that multiple copies of it can run simultaneously.\n\n**Before:**\n\n```yml\nfrontend-fixtures:\n  extends:\n    - .frontend-fixtures-base\n    - .frontend:rules:default-frontend-jobs\n```\n\n**After:**\n\n```yml\nrspec-ee frontend_fixture:\n  extends:\n    - .frontend-fixtures-base\n    - .frontend:rules:default-frontend-jobs\n  parallel: 2\n```\n\nYou will notice two changes. First, we changed the name of the job, so our job is picked up by [Knapsack](https://docs.knapsackpro.com/ruby/knapsack) (more on that later), and then we add the keyword `parallel`, so the job gets duplicated and runs in parallel.\n\nThe new jobs that are generated look like this:\n\n![Our fixtures job running in parallel](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job-parallel.png){: .shadow.medium.center}\nThe new jobs that are picked up by Knapsack and run in parallel.\n{: .note.text-center}\n\nAs we used a value of `parallel: 2`, actually two jobs are generated with the names:\n\n- `rspec-ee frontend_fixture 1/2`\n- `rspec-ee frontend_fixture 2/2`\n\nOur two \"generated\" jobs, now take three and 17 minutes respectively, giving us an overall decrease of about three minutes.\n\n![Two parallel jobs in the pipeline](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job-detail.png){: .shadow.medium.center}\nThe parallel jobs that are running in the pipeline.\n{: .note.text-center}\n\n## Another way we optimized the process\n\nAs we use Knapsack to distribute the test files among the parallel jobs, we were able to make more improvements by reducing the time it takes our longest-running fixtures-generator file to run.\n\nWe did this by splitting the file into smaller batches and optimizing it, so we have more tests running in parallel, which shaved off an additional [~3.5 minutes](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47158#note_460372560).\n\n## Tips for running parallel jobs\n\nIf you want to ramp up your productivity you can leverage `parallel` on your pipelines by following these tips:\n\n1. Measure the time your pipelines take to run and identify possible bottlenecks to your jobs. You can do this by checking which jobs are slower than others.\n1. Once your slow jobs are identified, try to figure out if they can be run independently from each other or in batches.\n   - Automated tests are usually good candidates, as they tend to be self-contained and run in parallel anyway.\n1. Add the `parallel` keyword, while measuring the outcome over the next few running pipelines.\n\n## Learn more about our solution\n\nWe discuss how running jobs in parallel improved the speed of pipelines on GitLab Unfiltered.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/hKsVH_ZhSAk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAnd here are links to some of the resources we used to run pipelines in parallel:\n\n- The [merge request that introduced `parallel` to fixtures](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/46959).\n- An important [optimization follow-up](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47158) to make one of the slow tests faster.\n- The [Knapsack gem](https://docs.knapsackpro.com/ruby/knapsack), which we leverage to split the tests more evenly in multiple CI nodes.\n\nAnd many thanks to [Rémy Coutable](/company/team/#rymai), who helped me implement this improvement.\n\nCover image by [@dustt](https://unsplash.com/@dustt) on [Unsplash](https://unsplash.com/photos/ZqBNb7xK5s8)\n{: .note}\n",[9,1090,1293,1295,723],{"slug":6549,"featured":6,"template":680},"using-run-parallel-jobs","content:en-us:blog:using-run-parallel-jobs.yml","Using Run Parallel Jobs","en-us/blog/using-run-parallel-jobs.yml","en-us/blog/using-run-parallel-jobs",{"_path":6555,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6556,"content":6561,"config":6567,"_id":6569,"_type":14,"title":6570,"_source":16,"_file":6571,"_stem":6572,"_extension":19},"/en-us/blog/using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts",{"title":6557,"description":6558,"ogTitle":6557,"ogDescription":6558,"noIndex":6,"ogImage":5897,"ogUrl":6559,"ogSiteName":667,"ogType":668,"canonicalUrls":6559,"schema":6560},"Using web components to encapsulate CSS and resolve design system conflicts","How we used web component technologies like the Shadow DOM to make it easy to incrementally adopt our new design system, Slippers.","https://about.gitlab.com/blog/using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using web components to encapsulate CSS and resolve design system conflicts\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tyler Williams\"}],\n        \"datePublished\": \"2021-05-03\",\n      }",{"title":6557,"description":6558,"authors":6562,"heroImage":5897,"date":6564,"body":6565,"category":743,"tags":6566},[6563],"Tyler Williams","2021-05-03","\n\n## The goal: A new design for the GitLab blog\n\nIn March 2021, the [Digital Experience team](/handbook/marketing/digital-experience/) deployed a new and improved design for the GitLab blog. This design change affected more than 1,300 blog posts. It is the largest exercise to date for [our design system, Slippers](https://gitlab.com/gitlab-com/marketing/digital-experience/slippers-ui). It presented challenges due to the age and size of the GitLab blog. We wanted to live up to GitLab's [iteration value](https://handbook.gitlab.com/handbook/values/#iteration): \"Do the smallest thing possible and get it out as quickly as possible\".\n\n## The major challenge: Incrementally adopting a new design system with conflicting CSS\n\n[Slippers uses Tailwind CSS](https://gitlab-com.gitlab.io/marketing/inbound-marketing/slippers-ui/?path=/story/tailwind-css--page), which comes with its own set of base styles, called [Preflight](https://tailwindcss.com/docs/preflight). Preflight acts like normalizing styles (it's built on top of [modern-normalize](https://github.com/sindresorhus/modern-normalize)), which is useful for new projects, or projects making a full transition. In our case, Preflight is a hurdle because it has to work alongside our existing CSS.\n\nWe explored some out-of-the-box solutions, such as enabling the Tailwind [!important configuration](https://tailwindcss.com/docs/configuration#important), or using a [very specific selector strategy](https://tailwindcss.com/docs/configuration#selector-strategy).\n\nWe got very close to our desired outcome in both cases, but a problem remained:\n\nCritical legacy components required the old CSS. Those old styles were getting past `!important` and selector strategies because they applied to attributes we had not specified in our Tailwind utilities. Resolving those conflicts would take too much time and manual effort. We wanted a more [efficient](https://handbook.gitlab.com/handbook/values/#efficiency) solution, so we focused on two things: Identifying an ideal state for our CSS and finding a better CSS encapsulation. The goal was to prevent existing styles from affecting new components, and new styles from affecting old components.\n\n## The solution: CSS encapsulation with web components\n\n[Web component technologies](https://developer.mozilla.org/en-US/docs/Web/Web_Components) offered a compelling solution to the requirement that we use the old CSS. We used the [shadow DOM](https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_shadow_DOM) to encapsulate CSS. [Templates and slots](https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_templates_and_slots) allowed us to use existing HTML, ERB, and HAML templates. [Custom elements](https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_custom_elements) brought it all together.\n\nIn the [top-level blog template](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/master/sites/uncategorized/source/includes/cms/blog_post/slippers-blog-post.erb), we placed a [template tag for the blog post markup](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/77190/diffs#5667df8046398e47cb04d02fcc386420afc7ab57_35_28). The `template` tag is valid HTML – meaning our templating engine can process everything inside it. We can use `partial` tags and `yield` as expected and they become part of the template. The output below shows what that looks like (some classes omitted for brevity):\n\n```erb\n\u003Ctemplate id=\"slp-blog\">\n  \u003Cmain class=\"slpBlog\">\n    \u003Cheader class=\"slpBlog__header\">\n      \u003C%= partial \"includes/cms/blog_post/slp-blog-avatar\", locals: { author: author } %>\n      \u003C%= partial \"includes/cms/blog_post/slp-tags\" %>\n      \u003Chr/>\n    \u003C/header>\n    \u003Carticle class=\"slpBlog__article\">\n      \u003C% if current_page.data.image_title %>\n        \u003Cimg alt=\"\" src=\"\u003C%= current_page.data.image_title %>\" width=\"100%\"/>\n      \u003C% end %>\n      \u003C%= yield %>\n    \u003C/article>\n    \u003Caside class=\"slpBlog__aside\">\n      \u003C%= partial \"includes/cms/blog_post/slp-social-follow\" %>\n      \u003Cslot name=\"non-slippers-aside-items\">\u003C/slot>\n    \u003C/aside>\n    \u003Cfooter class=\"slpBlog__footer\">\n      \u003Chr/>\n      \u003C%= partial \"includes/cms/blog_post/slp-related-content\" %>\n      \u003Cslot name=\"non-slippers-footer-items\">\u003C/slot>\n      \u003Chr/>\n    \u003C/footer>\n  \u003C/main>\n\u003C/template>\n\u003Cscript src=\"/javascripts/slippers-blog.js\" type=\"text/javascript\">\u003C/script>\n```\n\nThe top-level template loads [`source/javascripts/slippers-blog.js`](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/master/source/javascripts/slippers-blog.js) inside the `body` of the document, which blocks rendering until the script finishes loading. `source/javascripts/slippers-blog.js` imports Slippers CSS as a variable [using webpack loader syntax](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/77190/diffs#5b5ceecb366e6e69e99e2bae290c68bae177fc17_0_2). With the CSS stored as a variable, we can inject it into the [custom element definition](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/77190/diffs#5b5ceecb366e6e69e99e2bae290c68bae177fc17_0_6).\n\nNext, we register `slp-blog` as a custom element. When the DOM parses the markup, it will either render the blog post template or, in the rare circumstance our JavaScript didn't load, it will fail. If it fails, we fall back to the [`yield` output in the light DOM](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/77190/diffs#5667df8046398e47cb04d02fcc386420afc7ab57_35_55) to make sure critical content is never lost. In these cases, our static site generator already rendered the template, so the images and text of the blog post remain accessible to the visitor.\n\nHere's what that JavaScript looks like:\n\n```js\nimport Vue from 'vue/dist/vue.min.js'\nimport Slippers from 'slippers-ui/dist/slippersComponents.common.js'\n\n// eslint-disable-next-line import/no-webpack-loader-syntax\nconst css = require(\"!raw-loader!sass-loader!../stylesheets/slippers.css.scss\").default;\n\n// Some event handlers and other requirements omitted for brevity\n\nexport function initializeSlippersWebComponent() {\n    if (window.customElements) {\n        customElements.define('slp-blog',\n            class extends HTMLElement {\n                constructor() {\n                    super();\n                    const template = document.getElementById('slp-blog').content;\n                    const shadowRoot = this.attachShadow({ mode: 'open' });\n                    shadowRoot.innerHTML = `\u003Cstyle>${css}\u003C/style>`;\n                    shadowRoot.appendChild(template.cloneNode(true));\n                }\n            });\n    }\n}\n```\n\nIf the script successfully loads, the light DOM content generated by our fallback `yield` statement is thrown away when the custom component is rendered. This is why we use an inline script tag beforehand - to avoid a [flash of unstyled content](https://en.wikipedia.org/wiki/Flash_of_unstyled_content#:~:text=A%20flash%20of%20unstyled%20content,before%20all%20information%20is%20retrieved.).\n\nFinally, we can use [slots to render non-Slippers items](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/77190/diffs#5667df8046398e47cb04d02fcc386420afc7ab57_40_59). Slotted elements get CSS from the light DOM, so our preexisting [partials and other included templating](https://gitlab.com/gitlab-com/www-gitlab-com/-/tree/master/sites/uncategorized/source/includes/blog) will still work as expected.\n\nOur custom element and its slots look something like this:\n\n```erb\n\u003Cslp-blog>\n  \u003C%= yield %>\n  \u003Cdiv slot=\"non-slippers-aside-items\">\n    \u003C%= partial \"includes/newsletter-signup.html\" %>\n  \u003C/div>\n  \u003Cdiv slot=\"non-slippers-footer-items\">\n    \u003C% unless current_page.data.install_cta == false %>\n      \u003C%= partial \"includes/blog/try\" %>\n    \u003C% end %>\n    \u003C% if ci_environment? %>\n      \u003C%= partial \"includes/blog/comments\" %>\n    \u003C% end %>\n  \u003C/div>\n\u003C/slp-blog>\n```\n\n## Results: Rapid iteration with minimal tradeoffs\n\nOur solution has some tradeoffs:\n\n1. We added complexity to the build process for our blog posts.\n1. Web components have wide browser support, but that's only a recent development. The best practices around these tools are still being debated.\n1. Technically, we added client-side rendering to our statically generated site, meaning we're giving up some of the static site benefits to achieve our CSS encapsulation.\n\nThose tradeoffs are worth it in the end. We achieved near-perfect CSS encapsulation which allowed us to iterate on Slippers and ship the blog template efficiently. We have reasonable fallbacks in place to preserve critical content for people who can't or won't load JavaScript to read our posts. Web components are the future, and we're excited to use them responsibly.\n",[1698,9],{"slug":6568,"featured":6,"template":680},"using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts","content:en-us:blog:using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts.yml","Using Web Components To Encapsulate Css And Resolve Design System Conflicts","en-us/blog/using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts.yml","en-us/blog/using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts",{"_path":6574,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6575,"content":6581,"config":6587,"_id":6589,"_type":14,"title":6590,"_source":16,"_file":6591,"_stem":6592,"_extension":19},"/en-us/blog/velocity-with-confidence",{"title":6576,"description":6577,"ogTitle":6576,"ogDescription":6577,"noIndex":6,"ogImage":6578,"ogUrl":6579,"ogSiteName":667,"ogType":668,"canonicalUrls":6579,"schema":6580},"How GitLab 14 satisfies the need for speed with modern DevOps","GitLab 14: Ship with velocity, ship with confidence","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682089/Blog/Hero%20Images/racecar_devops.jpg","https://about.gitlab.com/blog/velocity-with-confidence","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab 14 satisfies the need for speed with modern DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Parker Ennis\"}],\n        \"datePublished\": \"2021-07-29\",\n      }",{"title":6576,"description":6577,"authors":6582,"heroImage":6578,"date":6584,"body":6585,"category":743,"tags":6586},[6583],"Parker Ennis","2021-07-29","\n\n## How DevOps and NFS changed the game\n\nWhat if I told you that one of the best-selling racing video game franchises of all time, the \"Need For Speed\" (NFS), and DevOps have more in common with each other than you think? Yes, you read that correctly, probably not the NFS (Network File System) you were expecting.\n\n### An appetite for change\n\nFor context, the NFS series originally set out to redefine a saturated, yet unsophisticated, racing video game market. Motivated by an appetite for change, the NFS user experience reflected the human connection to real cars and how they behaved, which was a big challenge for developers in the 1990s. Nearly 30 years ago, \"The Need for Speed\" forever changed the landscape of racing games, selling 150 million copies since its debut.\n\n![The original Need For Speed game from 1994](https://about.gitlab.com/images/blogimages/need_for_speed.png){: .shadow.center}\nThe original Need For Speed video game set a new standard with an appetite for industry change.\n{: .note.text-center}\n\nCoincidentally, it was in 1994 that Grady Booch coined the term \"continuous integration\" (CI). Booch, like NFS, paved the way for immense industry growth in the realm of software development. CI aimed to redefine the manual, time-consuming development processes that paid little mind to how real humans and developers behaved and collaborated around application development by [leveraging automation to increase development speed without sacrificing quality](/topics/ci-cd/benefits-continuous-integration/).\n\nSimilar to how NFS took the racing scene by storm and laid the groundwork for the racing game genre, CI evolved into what is arguably the most important piece of DevOps best practices today: Continuous integration and continuous delivery (CI/CD).\n\nDevOps continues to evolve, but without CI/CD, DevOps isn't the collaborative practice that helps teams work faster and more efficiently. CI/CD is a super power within DevOps – unlocking the potential to ship apps with increased velocity and confidence in their quality, without having to choose one or the other.\n\n### DIY DevOps vs Modern DevOps\n\nToday, it doesn't matter what your business does, it's going to involve some amount of using and building software. DevOps gained traction in the age of digital transformation, where the rate of technical innovation acted as a forcing function for companies to fail or survive. Over the past 10 years or so, organizations had a choice to either embrace this \"need for speed\" and adopt DevOps practices, or be displaced by their competition.\n\nThis scramble led to a \"DIY\" style of DevOps that couldn't deliver on its promises much of the time. For many organizations, the biggest problem wasn't just the brittle toolchains composed of disparate pieces of software but also trying to make these complicated toolchains and processes benefit from DevOps. Since uprooting everything wasn't an option, the root of the problem was still there, and DevOps was hard to adopt.\n\nFor all the teams DevOps has helped, the DevOps marketplace must continuously improve and evolve as we learn more about the challenges of modernizing workflows. DevOps must modernize alongside businesses to ensure it's an accessible and realistic framework for as many companies as possible to leverage.\n\n### GitLab 14 fuels the modern DevOps need for speed\n\nWith a platform-driven approach, [GitLab 14](/releases/2021/06/22/gitlab-14-0-released/) delivers a consistent and efficient developer and operator experience that leads to a simplified and more predictable SDLC. A single user interface, embedded security, and a unified data store are just some of the features of a platform any company can use without the tradeoffs of the DIY DevOps past. By using one tool for source code management, CI, and CD, teams are more efficient and productive with streamlined collaboration. Engineers are happier when focused on value-add than when maintaining integrations – and happy developers help attract and retain talent.\n\n[GitLab 14](/gitlab-14/) ushers in a new era of modern DevOps as a global movement, and I'm excited to talk a little bit about some of its capabilities that help you ship software faster, with a higher degree of confidence, and improve your ability to respond to market changes.\n\n### Ship with velocity and confidence\n\n**1. [GitLab pipeline editor](/releases/2021/01/22/gitlab-13-8-released/#pipeline-editor)**\n\nCrafting pipelines can be complicated and verbose without an understanding of advanced pipeline syntax and how it fits within the workflow using the '.gitlab-ci.yml' configuration file. Needing to craft pipelines from scratch presents a steeper learning curve for organizations and teams with a less mature DevOps culture. The GitLab pipeline editor lowers the barrier to entry for CI/CD novices and accelerates power users with visual authoring and versioning, continuous validation, and pipeline visualization. Whether you're a more advanced user or novice, the pipeline editor unlocks additional power and usability.\n\n![Pipeline editor linting capability makes pipeline authoring easier](https://about.gitlab.com/images/blogimages/lint_ci.png){: .shadow.center}\nPipeline editor linting capability makes pipeline authoring easier and more efficient.\n{: .note.text-center}\n\nHere's what some of our wider community is saying about the pipeline editor:\n\n> \"I really like the direction of making CI/CD more accessible to first-time users and how GitLab rolls out this feature piece by piece.\" - Bernhard Knasmüller, computer scientist\n\n> \"This is going to improve the CI/CD configuration experience greatly!\" - Olivier Jourdan, developer\n\n**2. [GitLab Agent for Kubernetes](https://youtu.be/17O_ARVaRGo)**\n\nThe GitLab Agent for Kubernetes enables secure, cloud-native [GitOps](/solutions/gitops/). GitLab also meets customers where they are by supporting GitOps with agent-based and agentless approaches, and for deployments anywhere, regardless of whether infrastructure is cloud-native. It also enables alerts based on network policies for pull-based deployments.\n\nHere's piece of feedback from the wider GitLab community on the Kubernetes Agent:\n\n> \"GitLab is leading the evolution of DevOps by optimising work efficiency and cloud-native integration capabilities. This enables the rapid delivery of digital value.\" - Vasanth Kandaswamy, Head of Data and Applications Portfolio, Fujitsu Australia\n\nWe look forward to iterating and improving these capabilities and always [welcome your feedback](/submit-feedback/#product-feedback) on our product.\n\n### What's next?\n\nOne thing is for sure: **people want to go fast,** but not when it requires sacrificing peace of mind and quality. We're committed to helping you ship with velocity and confidence by [investing in specific product areas](/direction/#fy22-product-investment-themes) to bring the benefits of modern DevOps to anyone using GitLab to deliver their applications.\n\n![Go fast with confidence](https://about.gitlab.com/images/blogimages/gofast.gif){: .shadow.center}\nEven Ricky Bobby from Talledega Nights agrees. People just want to go fast!\n{: .note.text-center}\n\nWe'll continue executing on our [vision for CI/CD](https://gitlab.com/groups/gitlab-org/-/epics/4534) to create a visual pipeline authoring experience built right into GitLab that simplifies the complexity, letting you quickly create and edit pipelines while still exposing advanced options when you need them.\n\nWe're also committed to making sure you can deploy anytime and anywhere to take advantage of the benefits of Kubernetes, no matter where you are at on your cloud native development journey. If you have feedback or suggestions on what we can do better, please [let us know in our product epic.](https://gitlab.com/groups/gitlab-org/-/epics/3329)\n\nWe look forward to delivering you more value as we iterate upon this new era of GitLab 14 going foward and can't wait to see the great things you're creating with Gitlab.\n\n_This blog is part three in a three-part series on the top capabilities of GitLab 14. Learn more about [how GitLab 14 prepares you for DevSecOps 2.0 in part one](/blog/are-you-ready-for-the-newest-era-of-devsecops/), and about [how to optimize DevOps with GitLab 14's enhanced visibility tools in part two](/blog/optimizing-devops-visibility-in-gitlab-14/)._\n\nCover image by [CHUTTERSNAP](https://unsplash.com/@chuttersnapk) on [Unsplash](https://unsplash.com/photos/5Yo1P9ErikM)\n{: .note}\n",[1440,9,1090,1293,530],{"slug":6588,"featured":6,"template":680},"velocity-with-confidence","content:en-us:blog:velocity-with-confidence.yml","Velocity With Confidence","en-us/blog/velocity-with-confidence.yml","en-us/blog/velocity-with-confidence",{"_path":6594,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6595,"content":6601,"config":6606,"_id":6608,"_type":14,"title":6609,"_source":16,"_file":6610,"_stem":6611,"_extension":19},"/en-us/blog/version-12-year-in-review",{"title":6596,"description":6597,"ogTitle":6596,"ogDescription":6597,"noIndex":6,"ogImage":6598,"ogUrl":6599,"ogSiteName":667,"ogType":668,"canonicalUrls":6599,"schema":6600},"GitLab Version 12 Year In Review: Releases 12.0 to 12.10","Product highlights from a pivotal year","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680891/Blog/Hero%20Images/cloud-adoption-roadmap.jpg","https://about.gitlab.com/blog/version-12-year-in-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Version 12 Year In Review: Releases 12.0 to 12.10\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brian Glanz\"}],\n        \"datePublished\": \"2020-05-21\",\n      }",{"title":6596,"description":6597,"authors":6602,"heroImage":6598,"date":6603,"body":6604,"category":675,"tags":6605},[2333],"2020-05-21","\n\nAt GitLab, we understand that the strength of your business depends on moving fast. And what makes us strong in good times, makes us resilient in challenging times.\n\nStrength and resilience come from speed, yes, but also agility, operational efficiency, security, and above all, reliability. We've released a new version on the 22nd of every month for [now more than 100 consecutive months](/releases/).\n\nAs we’ve grown, those monthly releases now include dozens of significant features and improvements: 719 new features and improvements total in versions 12.0–12.10, as documented in our [release blog posts](/releases/categories/releases/).\n\nWe’ll cover some of the highlights and trends here in this Version 12 Year in Review. Watch it in the video below, or read on for more detail and all the links.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe width=\"1141\" height=\"642\" src=\"https://www.youtube.com/embed/IXRuepeH3xg\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n*Here is the [Version 12 Year in Review slide deck](https://docs.google.com/presentation/d/1ht1da_WIrkG_4Cx5C89D4C7zxV80MIExUVwCyZD10aw/edit?usp=sharing) shown in the video.*\n\nGitLab is not just another startup. Over the past year, the awesome GitLab community [made almost 200 contributions to our open source software in every monthly release](https://gitlab.biterg.io/goto/937475d38035f496df3501c9b30af5ef). Community contributions to versions 12.0–12.10 totaled an incredible 2,158 🙌, and contributions per month/per version have grown steadily over versions 10, 11, and 12:\n\n![GitLab Monthly Community Contributions per Major Version](https://about.gitlab.com/images/blogimages/GitLabCommunityContributionsMonthlyPerMajorVersion.png){: .shadow}\n\nOur company and our community are truly working together to make [DevOps](/topics/devops/) a reality for teams of all sizes.\n\nLooking back, Version 12 has been about expanding our focus, first by treating developers, security, and operations alike as first-class citizens in DevOps. [We called 12.0 our DevSecOps release](/releases/2019/06/22/gitlab-12-0-released/) and we’ve delivered on that vision in the year since.\n\nGitLab has also grown to help more types of users contribute, such as by building in [Requirements Management](/releases/2020/04/22/gitlab-12-10-released/#create-and-view-requirements-in-gitlab) and [Design Management](/releases/2019/08/22/gitlab-12-2-released/#annotations-for-designs). As you have more people collaborating in a single application, features like our new [Compliance Dashboard](/blog/make-tracking-agreements-simple-compliance-dashboard/) are more useful, in that case making compliance easier for everyone.\n\n## Dev\n\n![Dev](https://about.gitlab.com/images/blogimages/GitLab-Dev.png){: .small.left.wrap-text}\n\nFor developers, it’s all about delivering and shipping faster while meeting business demands. Today, we provide granular analytics on merge requests, with [Productivity Analytics](/releases/2019/09/22/gitlab-12-3-released/#productivity-analytics) and [Code Review Analytics](/blog/troubleshoot-delays-with-code-review-analytics/), in addition to full, downloadable [Code Quality Reports](/releases/2020/03/22/gitlab-12-9-released/#full-code-quality-report) for even more visibility.\n\nBuilt-in package management now includes a [GitLab Conan repository](/releases/2019/12/22/gitlab-12-6-released/#manage-cc-packages-via-conan-within-gitlabs-package-registry) for C and C++ developers, and a [NuGet repository](/releases/2020/02/22/gitlab-12-8-released/#build-publish-and-share-packages-to-the-gitlab-nuget-net-repository) for Windows developers.\n\nAutomation is fundamental for DevOps teams, and with [Directed Acyclic Graphs](/releases/2019/08/22/gitlab-12-2-released/#directed-acyclic-graphs-dag-for-gitlab-pipelines) and [Parent-Child Pipelines](/releases/2020/01/22/gitlab-12-7-released/#parent-child-pipelines), complex pipelines are now faster and more flexible.\n\n## Sec\n\n![Sec](https://about.gitlab.com/images/blogimages/GitLab-Sec.png){: .small.right.wrap-text}\n\n[DevSecOps](/solutions/security-compliance/) is not only about shifting left with testing or security — it’s also increasing visibility downstream or “shifting right.” Security teams need to manage and mitigate business risk. To do that, they need visibility into development and what vulnerabilities are being created or discovered.\n\nYou can now easily access and export a project’s [Dependency List](/releases/2019/06/22/gitlab-12-0-released/#project-dependency-list) or “Bill of Materials.” Our [scorecard on Security Dashboards](/releases/2019/12/22/gitlab-12-6-released/#quickly-understand-your-at-risk-projects-with-project-security-grades) lets you know immediately which projects are most at risk.\n\nWe also have more efficient [vulnerability management](/releases/2020/03/22/gitlab-12-9-released/#select-and-dismiss-multiple-vulnerabilities) on security dashboards and [autoremediation of vulnerabilities found in Container Scanning](/releases/2020/03/22/gitlab-12-9-released/#suggested-solution-for-container-scanning). Our new [Container Network Security](/releases/2020/02/22/gitlab-12-8-released/#network-policies-for-container-network-security) helps prevent lateral attacks.\n\n## Ops\n\n![Ops](https://about.gitlab.com/images/blogimages/GitLab-Ops.png){: .small.left.wrap-text}\n\nBut containers and applications don’t really run themselves. Application teams everywhere need to plan and architect for stability and efficiency.\n\nEnvironments become hard to wrangle when you have more than just a few, and our [Environments Dashboard](/releases/2019/11/22/gitlab-12-5-released/#environments-dashboard) lets you see what’s going on across projects. For teams with a high volume of merges, [Merge Trains](/releases/2019/07/22/gitlab-12-1-released/#parallel-execution-strategy-for-merge-trains) help mitigate potential conflicts in production pipelines.\n\nManaging deploy tokens is now more efficient, as we introduced both [group-level deploy tokens](/releases/2020/03/22/gitlab-12-9-released/#group-deploy-tokens) and an API to administer them. You can now leverage [HashiCorp Vault](/releases/2020/03/22/gitlab-12-9-released/#secure-your-applications-with-secrets-management-and-vulnerability-remediation) to securely manage keys, tokens, and other secrets with Vault as a GitLab CI managed app.\n\nWe are strong believers in automating repetitive tasks. Creating a new cluster should be simple, and our [EKS integration](/releases/2019/11/22/gitlab-12-5-released/#easily-create-and-deploy-to-an-eks-cluster) does just that. Finally, our new [Log Explorer](/releases/2020/02/22/gitlab-12-8-released/#explore-aggregated-logs) aggregates Kubernetes logs across pods and services, making them searchable and much more useful.\n\n## GitLab is for Everyone\n\nWith Version 12, we can see GitLab is not only for developers, and it’s not even only for DevOps. Expect more progress in Version 13 and beyond, as we continue on [our mission to change all creative work from read-only to read-write](/company/#about-us).\n\nGitLab is for everyone, and everyone can contribute. [Join us, at GitLab.com](https://about.gitlab.com/).\n\nCover image by [Matt Howard](https://unsplash.com/@thematthoward?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/journey?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[677,267,9],{"slug":6607,"featured":6,"template":680},"version-12-year-in-review","content:en-us:blog:version-12-year-in-review.yml","Version 12 Year In Review","en-us/blog/version-12-year-in-review.yml","en-us/blog/version-12-year-in-review",{"_path":6613,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6614,"content":6620,"config":6625,"_id":6627,"_type":14,"title":6628,"_source":16,"_file":6629,"_stem":6630,"_extension":19},"/en-us/blog/watch-the-gitlab-summit-from-your-desk",{"title":6615,"description":6616,"ogTitle":6615,"ogDescription":6616,"noIndex":6,"ogImage":6617,"ogUrl":6618,"ogSiteName":667,"ogType":668,"canonicalUrls":6618,"schema":6619},"We're coming to you live from Crete, at the GitLab Summit!","Read on for all the events you can watch and participate in.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680330/Blog/Hero%20Images/greece-summit-2017.png","https://about.gitlab.com/blog/watch-the-gitlab-summit-from-your-desk","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're coming to you live from Crete, at the GitLab Summit!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2017-10-17\",\n      }",{"title":6615,"description":6616,"authors":6621,"heroImage":6617,"date":6622,"body":6623,"category":299,"tags":6624},[784],"2017-10-17","\n\nIt's that time again! Every nine months, our entire remote workforce descends on one location for the [GitLab Summit](/events/gitlab-contribute/). This year, we'll be in Crete, and you're invited!\n\n\u003C!-- more -->\n\nBefore you go off and buy a plane ticket, we should clarify that there probably isn't room for all of you on the island. But we're trying something new this Summit — we're live streaming every day to bring the experience to as many of our remote friends as possible.\n\n## Watch\n\nWe'll be streaming on [YouTube](https://rebrand.ly/gitlab-summit-stream). You can watch [Sid](/company/team/#sytses) and [Dmitriy](/company/team/#dzaporozhets)'s keynotes, our Santorini trip, [GitLab team-member-led user generated content (UGC) sessions](https://docs.google.com/forms/d/e/1FAIpQLSf9PSEMkxdlYQnAmDcXvsqeeXe-O1kRECZopG9nmwfn_O5qgA/viewform), the 10.1 release, and our final party can all be viewed in one place.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/95FuYdcziLQ\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\n## Schedule\n\nWe'll likely make some changes to this schedule as we close in on kickoff, so please keep in mind it's a WIP!\n\n#### [Thursday, October 19](https://www.youtube.com/watch?v=3EegHi0fdPQ)\n\nWatch Thursday's live stream [here](https://www.youtube.com/watch?v=3EegHi0fdPQ)\n\n* 10am-6pm UTC - Arrivals and getting to know GitLab team-members\n\n#### [Friday, October 20](https://www.youtube.com/watch?v=AopRnEbvgzE)\n\nWatch Friday's live stream [here](https://youtu.be/AopRnEbvgzE)\n\n* 7am UTC - Welcome & keynote with GitLab CEO Sid Sijbrandij ([@sytses](https://twitter.com/sytses))\n* 8am UTC - AMA with GitLab CEO Sid Sijbrandij (chat your questions here or on Twitter using #GitLabSummit)\n* 9am UTC - Eat lunch with us!\n* 10am UTC - Join us live for our Amazing Race challenge\n* 12pm UTC - How GitLab Started keynote with CTO & Co-founder Dmitriy Zaporozhets ([@dzaporozhets](https://twitter.com/dzaporozhets);chat your questions on YouTube or on Twitter using #GitLabSummit)\n* 1:15pm UTC - Award Ceremony and Happy Hour\n* 3pm UTC - GitLab BBQ\n\n#### Saturday, October 21\n\n**Due to WIFI issues, we were not able to live stream Saturday's events. However,\nwe'll be showing re-runs and highlight footage during [Sunday's stream](https://www.youtube.com/watch?v=95FuYdcziLQ).**\n\n* 4am - 5pm UTC Day trip to Santorini\n* 5:15 pm UTC - Join us for dinner and hallway conversations\n\n#### [Sunday, October 22](https://www.youtube.com/watch?v=95FuYdcziLQ)\n* 6-7am UTC - AMAs - Send us your questions ahead of time on Twitter with #GitLabSummit\n * 6-6:15am Mark Pundsack (@MarkPundsack), Head of Product\n * 6:15-6:30am Barbie Graver (@BarbieGraver), Chief Culture Officer\n * 6:30-6:45am Sarrah Vesselov (@SVesselov), UX Lead\n* 7am UTC - Chat with our developers and engineers as they release GitLab 10.1\n* 9am-3pm UTC - Day trip to Heraklion\n\n#### [Monday, October 23](https://www.youtube.com/watch?v=7r9mo-QwBbM)\n\n[Vote for the UGC Sessions you want to see on the live stream!](https://docs.google.com/forms/d/e/1FAIpQLSf9PSEMkxdlYQnAmDcXvsqeeXe-O1kRECZopG9nmwfn_O5qgA/viewform)\n\n* 6-11am UTC - Send us your questions to have them answered live\n* 11am UTC - UGC Session 1\n* 12pm UTC - UGC Session 2\n* 1pm UTC - UGC Session 3\n* 2pm UTC - UGC Session 4\n* 3pm UTC - Join us for dinner\n* 5pm UTC - Join us for Game Night and a Gitter AMA\n\n#### [Tuesday, October 24](https://www.youtube.com/watch?v=LRpkLBWA_sI)\n\n[Vote for the UGC Sessions you want to see on the live stream!](https://docs.google.com/forms/d/e/1FAIpQLSf9PSEMkxdlYQnAmDcXvsqeeXe-O1kRECZopG9nmwfn_O5qgA/viewform)\n\n* 6-11am UTC Send us your questions to have them answered live\n* 11am UTC - UGC Session 1\n* 12pm UTC - UGC Session 2\n* 1pm UTC - UGC Session 3\n* 2pm UTC - UGC Session 4\n* 3pm UTC - Join us for dinner\n* 5pm UTC - Join the Toga Party!\n\n## Get involved\n\nWe want to see you there! [Tweet us](https://twitter.com/gitlab) using #GitLabSummit to let us know your questions and comments. We'll be giving away limited edition swag to people who chime in and ask questions on social, and we'll also poll you to ask which UGC sessions you want live streamed. We're so excited to share the Summit with our community for the first time, and we hope you'll join us!\n\nRead more about our company values in our [open source](/blog/our-handbook-is-open-source-heres-why/) [handbook](https://handbook.gitlab.com/handbook/values/), licensed by [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/).\n",[832,9],{"slug":6626,"featured":6,"template":680},"watch-the-gitlab-summit-from-your-desk","content:en-us:blog:watch-the-gitlab-summit-from-your-desk.yml","Watch The Gitlab Summit From Your Desk","en-us/blog/watch-the-gitlab-summit-from-your-desk.yml","en-us/blog/watch-the-gitlab-summit-from-your-desk",{"_path":6632,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6633,"content":6638,"config":6643,"_id":6645,"_type":14,"title":6646,"_source":16,"_file":6647,"_stem":6648,"_extension":19},"/en-us/blog/welcome-to-gitlab-unfiltered",{"title":6634,"description":6635,"ogTitle":6634,"ogDescription":6635,"noIndex":6,"ogImage":690,"ogUrl":6636,"ogSiteName":667,"ogType":668,"canonicalUrls":6636,"schema":6637},"Welcome to the home of GitLab Unfiltered","The GitLab Unfiltered blog is user-generated content by the GitLab team.","https://about.gitlab.com/blog/welcome-to-gitlab-unfiltered","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Welcome to the home of GitLab Unfiltered\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2019-08-20\",\n      }",{"title":6634,"description":6635,"authors":6639,"heroImage":690,"date":6640,"body":6641,"category":808,"tags":6642},[2353],"2019-08-20","\n\nIn the spirit of [transparency](https://handbook.gitlab.com/handbook/values/#transparency) and \"[everyone can contribute](/company/mission/#mission),\" the GitLab Unfiltered blog is user-generated content by the GitLab team.\n\nAny GitLab team member is free to publish to the Unfiltered blog, provided that they have a peer review their post first.\nPlease read the [GitLab Unfiltered handbook](/handbook/marketing/blog/unfiltered/) to find out how to contribute.\n\nWatch this space!\n",[9],{"slug":6644,"featured":6,"template":680},"welcome-to-gitlab-unfiltered","content:en-us:blog:welcome-to-gitlab-unfiltered.yml","Welcome To Gitlab Unfiltered","en-us/blog/welcome-to-gitlab-unfiltered.yml","en-us/blog/welcome-to-gitlab-unfiltered",{"_path":6650,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6651,"content":6657,"config":6662,"_id":6664,"_type":14,"title":6665,"_source":16,"_file":6666,"_stem":6667,"_extension":19},"/en-us/blog/welcome-to-the-devops-platform-era",{"title":6652,"description":6653,"ogTitle":6652,"ogDescription":6653,"noIndex":6,"ogImage":6654,"ogUrl":6655,"ogSiteName":667,"ogType":668,"canonicalUrls":6655,"schema":6656},"Welcome to the DevOps Platform era","GitLab CEO Sid Sijbrandij reflects on the evolution of DevOps and the emergence of the DevOps Platform as the solution for businesses wanting to deliver software faster, more securely, and at a lower cost.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668101/Blog/Hero%20Images/dop_cover.png","https://about.gitlab.com/blog/welcome-to-the-devops-platform-era","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Welcome to the DevOps Platform era\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2021-08-03\",\n      }",{"title":6652,"description":6653,"authors":6658,"heroImage":6654,"date":6659,"body":6660,"category":1340,"tags":6661},[762],"2021-08-03","\nDevOps has evolved since its infancy, over a decade ago. Swiss developmental psychologist Jean Piaget believed human cognitive development has [four stages](https://www.healthline.com/health/piaget-stages-of-development) (sensorimotor, preoperational, concrete operational, and formal operational). Through each of these stages, the human mind obtains new knowledge while building and modifying memories to inform one's understanding of the world around them.\n\nIn the same way that people go through stages as they grow, markets and industries also go through stages of development. Over the years, DevOps has grown into a mature, business-critical practice.\n\nAs the DevOps industry expanded, so did the number and complexity of tool-project integrations within an organization. This was the result of three developments in DevOps:\n\n1. Companies moved from monolithic architectures to [microservices architectures](/topics/microservices/). By doing so, applications could scale independently, allowing teams to move faster.\n2. The faster delivery of software also required companies to use more DevOps tools per project.\n3. The linear growth of both or more projects and more tools per project led to an exponential increase in the number of project-tool integrations.\n\nThis increase in project-tool integrations called for a change in the way organizations adopted DevOps tools. At GitLab, we identified four phases of evolution in the adoption of DevOps tools over time.\n\n## Phase 1 - Siloed DevOps\n\nIn this early phase, each department or team built or purchased their own tools in isolation, which they optimized for their own narrow objectives, without explicitly coordinating with others. This led to a \"Siloed DevOps\" environment that caused problems when teams tried to work together because they were not familiar with the tools of the other teams. It is common for organizations at this level of maturity to have multiple duplicative sets of tooling for common DevOps functions like planning, source code management, and CI/CD. The chaotic environment slows down collaboration and knowledge sharing or stops it altogether.\n\n## Phase 2 - Fragmented DevOps\n\nThe need for less chaos and more harmony drove organizations to the second phase, Fragmented DevOps. In this phase, organizations standardized on the same set of tools across the organization. Typically, there was one preferred tool for each stage of the DevOps lifecycle. Teams within the same function could collaborate better, but the tools were not connected between stages. As an example, planning was standardized and deployment was standardized, but each stage was still siloed from each other. It was hard to move through the DevOps lifecycle.\n\n## Phase 3 - DIY DevOps\n\nOrganizations that tried to remedy this by manually integrating their DevOps point solutions together reached the third phase, \"DIY DevOps\". Unfortunately – as many DIYers will know all too well – when you try to put together many different parts that were never designed to work with each other, the end results never fit quite right. In the same way, homegrown toolchains create complex workflows that slow down the development process — and overall cycle time. For many organizations, maintaining DIY DevOps toolchains requires significant effort, resulting in higher costs, slower cycle times, and opportunities for vulnerabilities to be targeted.\n\n## Phase 4 - The DevOps Platform era\n\nThe true potential of DevOps was not fully realized in the first three phases. That's why I am proud that GitLab is the leader in enabling the fourth phase, the DevOps Platform era. [The DevOps Platform](/topics/devops-platform/) is a single application with one user interface and a unified data store. It includes every stage of the DevOps lifecycle and brings together development, operations, and security teams. It allows these groups to collaboratively plan, build, secure, and deploy software. As a result, this improves businesses' velocity, efficiency, and security, allowing them to deliver software faster and at a lower cost.\n\n## The future of DevOps\n\nWhen I think about the future of DevOps, three things stand out. First, I believe that a platform solution with embedded security _ is_ the future. Security that is built-in, not bolt-on, is needed to secure a software supply chain from end-to-end without sacrificing speed for security.\n\nFor example, the world's most trusted hacker-powered security company, HackerOne, is using The DevOps Platform. With GitLab, they've been able to replace their DIY toolchain and shift security left. HackerOne is now catching security flaws early and getting immediate feedback since security is built into the developer's workflow.\n\nIn May, the U.S. government [issued a new policy](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/) aimed at securing both the private and public sector software supply chains against malicious cyberattacks. Now is the time to make security a fundamental part of your DevOps journey. In today's landscape, you need to secure 100% of your applications every time they get updated. The only practical way to do that is to integrate security into the platform.\n\nSecond, I believe that machine learning will be critical in making the DevOps workflow faster. In the [GitLab 2021 DevSecOps survey](/developer-survey/), 75% of respondents reported that their DevOps teams are using or planning to use machine learning or AI for testing and code review. In June, [GitLab announced the acquisition](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities.html) of a machine learning-based solution called UnReview. This acquisition and continued machine learning integration will automate workflows and compress the DevOps cycle time. GitLab is focused on using machine learning to reduce friction in your work, so you can spend more time innovating.\n\nThird, I believe DevOps platform adoption will accelerate. [Gartner predicts that by 2023](/press/releases/2020-12-09-gitlab-cited-as-representative-vendor-in-gartner-market-guide.html), 40% of organizations will have switched from multiple point solutions to a platform in order to streamline application delivery. Gartner's prediction is an increase from the base of 10% or less using a DevOps Platform in 2020. GitLab customers often tell us that DIY toolchains are too complicated. If you're feeling that way too, it's time to choose a path to simplicity. The fastest way to get there is with the DevOps Platform.\n\nYou don't need to rip and replace to get started. Many customers began their GitLab journey with Source Code Management and CI. When they were ready, GitLab helped them to replace the rest of their DIY DevOps. When _you're_ ready, GitLab will work with you and GitLab's partner ecosystem to help you achieve your DevOps objectives on your schedule.\n\nJust like human cognitive development, DevOps has evolved thanks to combined experiences and new knowledge as it became available. I'm grateful to the innovators before us with the same goal: To make DevOps more efficient and collaborative.\n\n## Join us at GitLab Virtual Commit\n\nWant more DevOps? Tune in virtually at [GitLab Commit August 3-4, 2021](/events/commit/). Watch a video of the keynote address this blog post is based on:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/582282482\" width=\"640\" height=\"360\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C!-- blank line -->\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [It's time to build more accessible software. A DevOps platform can help](/blog/how-the-devops-platform-makes-building-accessible-software-easier/)\n",[1440,675,9],{"slug":6663,"featured":6,"template":680},"welcome-to-the-devops-platform-era","content:en-us:blog:welcome-to-the-devops-platform-era.yml","Welcome To The Devops Platform Era","en-us/blog/welcome-to-the-devops-platform-era.yml","en-us/blog/welcome-to-the-devops-platform-era",{"_path":6669,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6670,"content":6676,"config":6681,"_id":6683,"_type":14,"title":6684,"_source":16,"_file":6685,"_stem":6686,"_extension":19},"/en-us/blog/what-diversity-inclusion-and-belonging-looks-like-in-the-tech-industry",{"title":6671,"description":6672,"ogTitle":6671,"ogDescription":6672,"noIndex":6,"ogImage":6673,"ogUrl":6674,"ogSiteName":667,"ogType":668,"canonicalUrls":6674,"schema":6675},"How diversity, inclusion, and belonging looks in the tech industry","The tech industry is predominantly white and male, which has historically made it challenging for underrepresented minorities to gain a foothold in leadership.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681364/Blog/Hero%20Images/dib-mit.png","https://about.gitlab.com/blog/what-diversity-inclusion-and-belonging-looks-like-in-the-tech-industry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How diversity, inclusion, and belonging looks in the tech industry\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-06-17\",\n      }",{"title":6671,"description":6672,"authors":6677,"heroImage":6673,"date":6678,"body":6679,"category":299,"tags":6680},[672],"2020-06-17","\n\n_This is the second in our three-part series on diversity, inclusion and belonging. [Part one](/blog/our-journey-to-a-diverse-and-inclusive-workplace/) focuses on GitLab's goals and efforts to date._\n\nIt’s no secret that the tech industry has stumbled, struggled, and in some cases outright failed when it comes to building inclusive workplaces where women, people of color, members of the LGBTQ community, and other underrepresented groups are adequately represented. And how can they, when the majority of people working in and leading tech companies are white men?\n\nIn 2014 several tech companies including Apple, Facebook, and Microsoft, released their first diversity reports, which showed what many people already knew: [The majority of tech workers are white and male](https://www.wired.com/story/five-years-tech-diversity-reports-little-progress/).\n\n\"Most times when you are forming a company people tend to gather those that are a lot like them whether it be race, occupation, gender, etc. As a result you take a look at your company start-up just to realize what you have formed is not as diverse as you would like,\" explains [Candace Byrdsong Williams](/company/team/#cwilliams3), diversity, inclusion, and belonging (DIB) partner at GitLab. \"That’s what tends to happen in a lot of companies. You’re in prep and formation mode and you are thinking how can I make the best product, not how can I gather the best people.\"\n\nForced to work backwards, the CEOs of the aforementioned tech giants vowed to make diversity a top priority. For five years, millions of dollars were spent on diversity initiatives, but research suggests that dollars have done little to help move the dial forward.\n\nIn 2019 separate assessments by Wired Magazine and [TechCrunch](https://techcrunch.com/2019/06/17/the-future-of-diversity-and-inclusion-in-tech/) showed that despite all the lip service paid to diversity, there have been little gains for underrepresented groups at major tech companies. Combined, Black and Latinx employees represented just 3% to 5% of employees at the 23 highest-grossing tech companies, according to a 2016 analysis by the [Kapor Center for Social Impact](https://www.kaporcenter.org/wp-content/uploads/2017/08/TechLeavers2017.pdf).\n\nIt is not just big tech companies that have struggled to build a more diverse workforce and a more inclusive workplace – the tech industry as a whole (including GitLab) has the opportunity to grow and do better.\n\nWe’ve been recognized as the [top company for diversity](/blog/comparably-awards-gitlab-top-culture-diversity-awards/), but our leadership team has no Black or Latinx people at director-or-above levels. Learn how we aim to [accelerate hiring and promotions for Black team members by 2021](/blog/our-journey-to-a-diverse-and-inclusive-workplace/).\n\nIn far too many companies, both in the tech industry and outside of it, diversity and inclusion endeavors are built on good intentions, but rarely are those intentions held to the rigorous standards of other important business targets.\n\n## Diversity has business value\n\nThe main question here is why, when homogeneity in the workforce has been shown to be a losing strategy? There is plenty of anecdotal evidence and [empirical research from Harvard Business Review](https://hbr.org/2016/11/why-diverse-teams-are-smarter) that shows [heterogeneous teams bring gains while homogeneous teams create costs](https://hbr.org/2018/07/the-other-diversity-dividend).\n\nIn a conversation between [Marcus Carter](/company/team/#recruiter), senior sales recruiter at GitLab, and [Ryan O’Nell](/company/team/#ronell), VP of commercial sales at GitLab, Ryan said that his commitment to diversity comes from past experience that shows diverse teams are simply better at problem-solving than homogeneous teams. Hard evidence supports this assertion: Heterogeneous teams focus on facts, they process those facts more carefully and create more innovative solutions, writes David Rock and Heidi Grant of the [Neuroleadership Institute](http://www.neuroleadership.org/) in the Harvard Business Review (HBR).\n\nIt seems simple: Diverse teams drive better business results, so hire more diverse team members – but to focus solely on recruitment is short-sighted.\n\nA common misstep many companies make is looking at recruiting people of color the same way that colleges do, says [Sharif Bennett](/company/team/#SharifATL), mid-market account executive at GitLab, and the co-lead of the [Minorities in Tech (MIT) Team Member Resource Group (TMRG)](/company/culture/inclusion/erg-minorities-in-tech/). The typical workplace doesn’t have the luxury of a ton of new people coming in each year, which means reducing attrition of underrepresented minorities is vital. The best way to do that? Create opportunities for advancement and build inclusive work cultures.\n\n\"Just as important as recruitment is retention,\" says Sharif at the DIB roundtable during GitLab Virtual Contribute 2020. \"Are we creating a safe space? An environment they are connected to, where they are feeling like their work is valued? You’re going to see recruits leave if persons of color aren’t feeling valued.\"\n\nResearch indicates that the most effective diversity and inclusion strategy is one that focuses as much, if not more, on creating an inclusive work environment with opportunities for belonging and advancement.\n\nIn an [interview with HBR](https://hbr.org/2019/11/the-day-to-day-work-of-diversity-and-inclusion), [Dr. Melissa Thomas-Hunt](https://www.linkedin.com/in/melissa-thomas-hunt-2843196), head of global diversity and belonging at Airbnb, advises companies to regularly assess their performance when it comes to DIB, and set diversity and inclusion-specific metrics while also creating real opportunities for mentorship and pathways to advancement for underrepresented minorities, in particular Black employees. Otherwise, many Black employees and other underrepresented minorities will leave for new opportunities.\n\n\"Black employees need to enter generative work environments — ones that allow all people to grow, develop, and flourish, and ones that signal they are valued. Without these, there will be a revolving door of Black talent who arrive excited, energized, and ready to contribute and leave feeling unseen and demoralized,\" says Dr. Hunt to HBR.\n\n## Unfairness pushes too many underrepresented minorities out of tech\n\nSix years after the first round of diversity commitments, big tech companies such as Apple, Facebook and Twitter are pledging to do more to promote diversity and inclusion, and are donating millions to social justice initiatives and organizations supporting the Black community after public outcry following the killing of George Floyd, according to an [article from CNBC](https://www.cnbc.com/2020/06/12/six-years-into-diversity-reports-big-tech-has-made-little-progress.html). However, despite these donations and commitments, the article said progress has been slow with increasing representation of Black professionals in leadership and technical roles in many major tech companies, which continue to be lead predominately by white men.\n\nThe CNBC article analyzed employee demographic data by big tech companies and found only marginal increases in the number of Black employees at many major companies. In companies that have bigger increases in the number of Black employees, those employees are often being hired for non-technical roles in distribution centers and for support roles, which are lower-paying than technical roles. Although hiring of Black professionals and people of color may increase, there are typically higher rates of turnover among people of color than other non-minority groups, says Margaret Neale, a Stanford professor quoted by CNBC.\n\nAttrition of underrepresented minorities is certainly not unique to these big tech companies. The findings of the Tech Leavers Study shows that attrition of underrepresented minorities is a significant problem in the tech industry at-large.\n\nThe Tech Leavers Study investigated who is voluntarily leaving the tech industry and why. The findings show that underrepresented minorities are more likely than non-minority groups to voluntarily leave their jobs in the tech industry. The number one reason? **Unfairness**.\n\nUnfairness or mistreatment was the number one driver of turnover among those surveyed – a problem that costs the tech industry an estimated $16 billion annually, according to the Tech Leavers Study. Of all the underrepresented groups surveyed by the Kapor Center, men of color were the most likely to leave their job in tech due to unfairness (40%). The experiences of unfairness differed across groups, but one-quarter of underrepresented men and women of color said they experienced stereotyping in the workplace.\n\nThe antidote to unfairness? Fairness, and an effective diversity and inclusion strategy. In the Tech Leavers Study, 62% of respondents say they would have stayed at their company if the company had taken steps to create a positive and respectful work environment, and 57% of respondents would have stayed if the company had taken steps to create a fair and inclusive work culture.\n\n## Building an effective DIB strategy\n\nThe Tech Leavers Study identified five common D&I initiatives that help reduce incidents of mistreatment in the workplace. When all five initiatives are in place, there is a large reduction in experiences of unfairness, according to the study. All five strategies are implemented at GitLab, where [DIB](https://handbook.gitlab.com/handbook/values/#diversity-inclusion) is one of our core values. It is through the efforts of Candace, the leads of our five TMRGs, and the company at large that we’ve made some considerable progress.\n\n1. [Hire a D&I director](/company/team/#cwilliams3)\n2. [Set explicit diversity goals](/company/culture/inclusion/#performance-indicators)\n3. [Pay bonuses for employee referrals for employees from underrepresented backgrounds](/handbook/incentives/#referral-bonuses)\n4. [Conduct unconscious bias training](https://handbook.gitlab.com/handbook/values/#unconscious-bias)\n5. [Establish Team Member Resource Groups (TMRGs)](/company/culture/inclusion/#ergs",[9],{"slug":6682,"featured":6,"template":680},"what-diversity-inclusion-and-belonging-looks-like-in-the-tech-industry","content:en-us:blog:what-diversity-inclusion-and-belonging-looks-like-in-the-tech-industry.yml","What Diversity Inclusion And Belonging Looks Like In The Tech Industry","en-us/blog/what-diversity-inclusion-and-belonging-looks-like-in-the-tech-industry.yml","en-us/blog/what-diversity-inclusion-and-belonging-looks-like-in-the-tech-industry",{"_path":6688,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6689,"content":6694,"config":6700,"_id":6702,"_type":14,"title":6703,"_source":16,"_file":6704,"_stem":6705,"_extension":19},"/en-us/blog/what-i-learned-about-our-ceo-s-job-from-participating-in-the-ceo-shadow-program",{"title":6690,"description":6691,"ogTitle":6690,"ogDescription":6691,"noIndex":6,"ogImage":5117,"ogUrl":6692,"ogSiteName":667,"ogType":668,"canonicalUrls":6692,"schema":6693},"What I Learned about the CEO's Job from Participating in GitLab's CEO Shadow Program","GitLab's CEO Shadow program gives team members insight into what the CEO does","https://about.gitlab.com/blog/what-i-learned-about-our-ceo-s-job-from-participating-in-the-ceo-shadow-program","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What I Learned about the CEO's Job from Participating in GitLab's CEO Shadow Program\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emilie Schario\"}],\n        \"datePublished\": \"2019-10-07\",\n      }",{"title":6690,"description":6691,"authors":6695,"heroImage":5117,"date":6697,"body":6698,"category":698,"tags":6699},[6696],"Emilie Schario","2019-10-07","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nOver the first two weeks in August, I got the chance to participate in GitLab's [CEO Shadow program](/handbook/ceo/shadow/#rotation-schedule), during which Shadows attend upwards of 90% of the CEO's meetings over the course of their 2-week rotation.\nWe attend job interviews, 1-on-1s that the CEO has with his direct reports, investor calls, and everything else that comes up.\nDuring my two week rotation, I was asked to drop off calls twice and asked to not join a 1:1 once.\nShadows are welcome to join any and all meetings, except for where the guest requests otherwise.\n\nWhile I had seen Sid's calendar leading up to my Shadow rotation, I really did not know what to expect.\nI had chatted with other [Shadow alumni](/handbook/ceo/shadow/#ceo-shadow-program-alumni-and-learnings), but their words of advice did not convey exactly how to prepare.\nI knew I wanted to turn the whole experience into a learning opportunity where I could sponge up every interaction.\nI knew there would be a lot going on.\nI planned to take it all in, then leave room for synthesis later.\n\nNow, as I reflect on my notes, I think the biggest clarity I've gotten is on *what the CEO's job is* and how that is reflected in Sid's day-to-day.\nFrom what I saw, no two days are alike - the weeks even less so - but when I sat back there were three obvious themes that emerged: making big decisions, reinforcing our values, and stepping in wherever there is a leadership gap.\n\n## Making Big Decisions\n\nAt GitLab, we believe in having a [DRI](/handbook/people-group/directly-responsible-individuals/) - Directly Responsible Individual - for most decisions.\nFor example, product managers are DRIs for their areas of the product. Having DRIs is an incredible aspect of GitLab, as it helps keep [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) from devolving into decisions by consensus and helps empower us to work asynchronously.\n\nIf there's a moment where a decision needs to be made, a DRI can make a decision.\nIf another piece of information comes up, we can make another decision. Rarely are decisions [one-way door decisions](https://handbook.gitlab.com/handbook/values/#make-two-way-door-decisions).\n[Making decisions quickly](/handbook/leadership/mitigating-concerns/#loss-of-velocity) is key to how [we can ship](/releases/categories/releases/) as much as we do, [iterating](https://handbook.gitlab.com/handbook/values/#iteration) along the way.\n\nI think it probably seems obvious that a CEO makes decisions all the time, but given the way we set DRIs for things, I was curious what sorts of decisions I could see Sid making, and I wanted to understand *why those decisions* were being escalated to Sid.\nMy cool discovery is that he was doing the things described in his [job family](https://handbook.gitlab.com/job-families/chief-executive-officer/).\nThe second requirement of the CEO's job description is\n> Hire great people. Help people that are not a good fit find another job.\n\nFor many leadership roles, Sid is still involved in the hiring decision.\nSince [lowering the hiring bar is one of our biggest concerns](/handbook/leadership/mitigating-concerns/#lowering-the-hiring-bar), it makes sense that this is an area where the CEO would spend his time.\nSid makes [pricing decisions](https://gitlab.com/gitlab-com/Product/blob/master/.gitlab/issue_templates/Move-A-Feature-Down.md#L25) because pricing is the CEO's job.\nSid makes the decisions that it's his job to be making.\nThere's no secret process behind the curtain.\n\n## Reinforcing our GitLab Values\n\nEvery interaction with Sid is an opportunity for him to reinforce the company values.\nWhat may look to some like *handbook policing* is simply him pushing us to continue to [write things down](https://handbook.gitlab.com/handbook/values/#write-things-down).\n*An obligatory breakout call* is an opportunity to [get to know each other](https://handbook.gitlab.com/handbook/values/#get-to-know-each-other).\nAnd *thinking too small* is just [scoping to the minimum viable change](https://handbook.gitlab.com/handbook/values/#move-fast-by-shipping-the-minimum-viable-change).\n\nHalf way through my first week, I was lucky enough to attend [TractionConf](https://www.tractionconf.io/) with Sid.\nThere he would be participating in a Fireside Chat with Frederic Lardinois of TechCrunch to talk about GitLab's origin story, remote work, our transparency value, and all the things that make GitLab unique.\nGiven that this was the first time I had seen Sid speak in-person at a non-GitLab event, I opened up a Google Doc and started typing.\nI didn't have a sense of what it'd be for or how it'd be useful.\nI figured better to have the notes than not and just went for it.\n\nTwo things happened that I could not have expected.\nFirst, because I shared the doc in our #ceo channel in Slack, people helped me clean it up as I was typing notes.\nI didn't have to worry about that `teh` that I mistyped because team members who were reading my notes as the talk was happening were helping me make it better.\nSecond, when someone tweeted about the session later on, Sid suggested I reply with my notes.\nWhat may look to some as *pieced together notes* is really just accepting that [everything is in draft](https://handbook.gitlab.com/handbook/values/#everything-is-in-draft) and working with a [low level of shame](https://handbook.gitlab.com/handbook/values/#low-level-of-shame).\nNow those notes can serve not just me, but anyone who wants to read about the session.\nThey're not perfect, but they are better than nothing.\nLearning to work with a low level of shame is hard - probably one of the hardest transitions about working at GitLab! - but it makes everyone's work experience better.\n\n{::options parse_block_html=\"true\" /}\n\u003Cblockquote class=\"twitter-tweet\">\n\u003Cp lang=\"en\" dir=\"ltr\">I really enjoyed the talk too! Here are my notes from the session, in case they&#39;re useful \u003Ca href=\"https://twitter.com/hashtag/TractionConf?src=hash&amp;ref_src=twsrc%5Etfw\">#TractionConf\u003C/a> \u003Ca href=\"https://t.co/b55bQITbNv\">https://t.co/b55bQITbNv\u003C/a>\u003C/p>&mdash; Emilie Schario (@emilieschario) \u003Ca href=\"https://twitter.com/emilieschario/status/1159667866918109190?ref_src=twsrc%5Etfw\">August 9, 2019\u003C/a>\n\u003C/blockquote>\n\nBy working with a low level of shame, I made it so that everyone could contribute!\n\nWhile I like to think I'm good at working *in the GitLab way* and encourage my peers to do the same, I never go out of my way to push people to work even more-so.\nI saw most interactions of Sid's have some aspect of stewarding our values.\nWhether he was coaching his direct reports on [how we organize the handbook](/handbook/handbook-usage/#style-guide-and-information-architecture), suggesting someone stop sharing their screen in a [Group Conversation](/handbook/group-conversations/) so that it feels more like a conversation, or pointing out that a section name is not [MECEFU](/handbook/communication/#mecefu-terms), Sid was regularly stewarding our values.\n\n## Stepping in as a Leader\n\nI saw this most obviously when I first started at GitLab before we had a CMO.\nSid was acting-CMO, so the many ways he was stepping in to run marketing was really visible in the company - most obviously in running the Group Conversations for Marketing.\n\nWe see this behavior in most managers.\nWhen a team member is on vacation or leave, their managers may step in to fill the gap.\nSid does the same for his direct reports.\nThis is most visible today in how we use [PTO Ninja](/handbook/paid-time-off/#pto-ninja).\n\nLots of CEO Shadows have walked away with [different takeaways](/handbook/ceo/shadow/#ceo-shadow-program-alumni-and-learnings).\nThe biggest understanding I walked away with was this: while Sid's job is to make decisions, steward our values, and step in when needed, it's actually expected of all of us too!\nWe live GitLab's values by having DRIs, helping steward our culture in small group settings, and fostering collaboration.\nIf you're a GitLab team member, go for the CEO Shadow program, if given the chance.\nIf you're thinking about rolling out a similar program at your company, I hope our details on [how the shadow program works at GitLab can be your blueprint](/handbook/ceo/shadow/).\n",[810,9,873],{"slug":6701,"featured":6,"template":680},"what-i-learned-about-our-ceo-s-job-from-participating-in-the-ceo-shadow-program","content:en-us:blog:what-i-learned-about-our-ceo-s-job-from-participating-in-the-ceo-shadow-program.yml","What I Learned About Our Ceo S Job From Participating In The Ceo Shadow Program","en-us/blog/what-i-learned-about-our-ceo-s-job-from-participating-in-the-ceo-shadow-program.yml","en-us/blog/what-i-learned-about-our-ceo-s-job-from-participating-in-the-ceo-shadow-program",{"_path":6707,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6708,"content":6714,"config":6720,"_id":6722,"_type":14,"title":6723,"_source":16,"_file":6724,"_stem":6725,"_extension":19},"/en-us/blog/what-is-a-vp-of-scaling",{"title":6709,"description":6710,"ogTitle":6709,"ogDescription":6710,"noIndex":6,"ogImage":6711,"ogUrl":6712,"ogSiteName":667,"ogType":668,"canonicalUrls":6712,"schema":6713},"VP of Scaling: What it is and how it works at GitLab","At GitLab we introduced the role of VP of Scaling early on. But what does that role mean and how has it worked at GitLab?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680369/Blog/Hero%20Images/vp-of-scaling.jpg","https://about.gitlab.com/blog/what-is-a-vp-of-scaling","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"VP of Scaling: What it is and how it works at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ernst van Nierop\"}],\n        \"datePublished\": \"2017-09-08\",\n      }",{"title":6709,"description":6710,"authors":6715,"heroImage":6711,"date":6717,"body":6718,"category":299,"tags":6719},[6716],"Ernst van Nierop","2017-09-08","\n\nFast-growing companies sometimes need leadership in new initiatives before there's time to hire a team member dedicated to them. This is how we tackled this challenge.\n\n\u003C!-- more -->\n\nIn the last two years GitLab has grown from about 15 people in a handful of countries to now well [over 180 people in more than 30 countries](/company/team/). In a company that is growing as fast as GitLab is, there is always some team that needs to be built, or some team to be temporarily led while a leader for the longer term is found, or some initiative to be started that doesn't (yet) fit within existing teams or departments. We can – and do – add people to the\nGitLab team to tackle these challenges. But hiring takes time and isn't always\nappropriate for a one-off or early-stage initiative. GitLab is also a fully remote and international organization that moves fast, and we can't afford to wait for these challenges to sit idle.\n\n## So who should build that team, be that interim leader, or start that initiative?\n\nAt GitLab, we've addressed this with\nthe role of VP of Scaling. The\nword \"scaling\" in this case relates to the _organization_ instead of, for example, sales or user-base. Think of the VP of Scaling as a full-time interim\nmanager rotating between vastly different functions, building teams and\nscalable processes. The job is to \"get in\" and to figure out how to \"get out\"\nresponsibly. (As an aside: at first we struggled to come up with a good name for this role and considered everything from janitor/plumber (sweeping /connecting the entire company – vetoed), to\n[Mr. Wolf](http://www.indiewire.com/2012/03/being-winston-wolfe-9-reasons-why-pulp-fiction-is-the-management-guide-every-indie-filmmaker-needs-48445/)\n(fixes problems on demand – too negative), until eventually settling on the\nkey word of \"scaling.\")\n\n## What does the role involve?\n\nA VP of Scaling should be broadly deployable in the company and go where the\nchallenges are. For us, the first task at hand was to scale up our team, starting with our ability to recruit and hire quickly and efficiently. And so it was that I began in the role of Interim Head of People Operations;\nfrom sending out employment agreements and setting up an candidate tracking\nsystem, to laying the groundwork for our\n[hiring process](/handbook/hiring/), building the\nbeginnings of the People Operations team, and developing the first iteration of the [global compensation calculator](/handbook/total-rewards/compensation/). Once the People Operations team was left in more experienced hands I moved on to help as (interim) Support Lead, followed more recently by time as interim Director of Infrastructure, and currently interim Director of Security.\n\nWith each of the teams that I've worked with, the challenges they've faced are a direct result of the success of the company. The Support team \"feels\" it through more customer tickets, and the Infrastructure team \"feels\" the increased usage of GitLab.com. Although no two teams are identical, there are some common approaches that I have found to be helpful in an interim leadership role.\n\n> Perhaps the most important point is to listen to the team – and to never stop asking questions.\n\nPerhaps the most important point is to listen to the team – and to never stop asking questions. The individuals in our team are smart, they have domain\nexpertise, and they often have great ideas on what needs to be done in order to be successful as a team. Regarding the \"never stop asking questions\" part, well, I think I've had that bit covered ever since I had the ability to talk.\n\nComing onboard with a new team, I listen to the concerns and ideas from the team and from the management chain that they report into, and sort the challenges into those that need to be addressed _right now_ (e.g. add more people to the team through hiring or borrowing; unblock a decision on topic X) from those that need to be addressed on a longer timescale. Once the immediate needs are taken care of, with the help of the team and sometimes outside experts we start sketching out what Utopia looks like for this team. What does the team, and the service the team provides, look like in a world where GitLab is 10x more popular? How about 100x?\n\n>Once the immediate needs are taken care of, with the help of the team and sometimes outside experts we start sketching out what Utopia looks like for this team.\n\nFor example, the Support Team faces the dual challenge of a growing _customer\nbase_ as well as a growing _product_ in terms of product scope and capabilities – straining the team. The \"right now\" solution involved adding\n[support turbos](/handbook/support/#additional-resources-for-the-support-team) and hiring people in multiple timezones to spread the customer ticket load evenly. To make it _scalable_ beyond the immediate needs is part of the Utopia for any team. In this case, our Support Engineers iterated quickly with the new hires to enable a mostly self-guided onboarding process as well as self-guided pathways for [continuous learning](/handbook/support/advanced-topics/).\n\nJumping from team to team in an interim role also provides for a great\nopportunity to help spread best practices from team to team, and to erase or\nmanage \"interfaces\" between teams. For example, the Support Team feels the\n_customer's_ sense of urgency around needing bug fixes or feature development,\nbut did not have a great way to effectively communicate that sense of urgency\nto the rest of the team without just making a lot of noise. So the team came up with a quantitative metric using\n[issue priority labels](/handbook/support/workflows/working-with-issues.html#adding-labels), with good success. When we noticed that the Infrastructure team – as the largest \"customer\" of GitLab Enterprise Edition – was having similar escalation problems, it was easy to adopt priority labels for [security](/handbook/security/#security-priority-labels) as well as [availability and performance](/handbook/engineering/performance/#performance-labels).\n\n## What are the challenges of the role?\n\nA key challenge (and attraction) of this role is that I need to get up to speed quickly on areas of the company and product in which I do not have much prior experience. I rely on the kindness and the expertise of the team, and benefit a lot from our dedication to documenting everything (which we do as an integral part of being successful in a remote-only setting). Of course I contribute back to this documentation as well: as we worked on reducing the latency of GitLab.com, I found myself wondering, \"What actually happens when a user enters a GitLab.com URL in their browser?\" and then documented the answer(s) on our handbook page about\n[GitLab.com performance](/handbook/engineering/performance).\nAnother challenge is, unsurprisingly, that I get somewhat attached to the teams that I'm actively working with. I enjoy learning from them, I enjoy working with and enabling them, and I enjoy getting to know the people behind the GitLab handle.\nIt can be difficult to _fully_ move on to the next assignment, with a few pending issues tenaciously hanging on to my todo list for way too long.\n\nDespite the odd job title and the fluid nature of the job itself, I like to think that it has worked well for us here at GitLab. Do you have a similar role at your company? We'd love to hear about it!\n\n[Cover image](https://unsplash.com/@ripato?photo=tpg_oEPzajA) by [Ricardo Gomez Angel](https://unsplash.com/@ripato) on Unsplash\n{: .note}\n",[9,810],{"slug":6721,"featured":6,"template":680},"what-is-a-vp-of-scaling","content:en-us:blog:what-is-a-vp-of-scaling.yml","What Is A Vp Of Scaling","en-us/blog/what-is-a-vp-of-scaling.yml","en-us/blog/what-is-a-vp-of-scaling",{"_path":6727,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6728,"content":6734,"config":6740,"_id":6742,"_type":14,"title":6743,"_source":16,"_file":6744,"_stem":6745,"_extension":19},"/en-us/blog/what-its-like-to-intern-in-gitlab-security",{"title":6729,"description":6730,"ogTitle":6729,"ogDescription":6730,"noIndex":6,"ogImage":6731,"ogUrl":6732,"ogSiteName":667,"ogType":668,"canonicalUrls":6732,"schema":6733},"What it's like to intern on the GitLab Security team","I spent 16 weeks interning across the GitLab security department and here’s what I learned","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672427/Blog/Hero%20Images/cgower_desk.jpg","https://about.gitlab.com/blog/what-its-like-to-intern-in-gitlab-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What it's like to intern on the GitLab Security team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Juliet Wanjohi\"}],\n        \"datePublished\": \"2020-08-13\",\n      }",{"title":6729,"description":6730,"authors":6735,"heroImage":6731,"date":6737,"body":6738,"category":698,"tags":6739},[6736],"Juliet Wanjohi","2020-08-13","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nBetween May and August 2020, I had the wonderful opportunity of being part of the [Engineering Internship Pilot Program](/handbook/engineering/internships/). Specifically, I was an intern in the [Security department](/handbook/security/) at GitLab. This was my first [all-remote](/company/culture/all-remote/guide/) role, and I must say it was an extremely worthwhile experience. 😄\n\nGetting to work remotely at GitLab offered a lot of flexibility as I could choose my own working hours where I was most productive, and at the same time learn how to become a [manager of one](https://handbook.gitlab.com/handbook/values/#managers-of-one) in my day-to-day tasks. Additionally, due to the team being fully-distributed, I was able to meet and collaborate with a diverse group of individuals from all over the world. The team was very helpful each step of the way, and I could always reach out to my manager and mentors if I required any assistance. What surprised me the most was that I was able to have chats with senior leadership in GitLab, which I think is great since one may not have such opportunities in a normal office setup.\n\nThe internship enabled me to grow exponentially in different aspects: technical skills, accountability, and within the [GitLab values](https://handbook.gitlab.com/handbook/values/) of collaboration, efficiency and transparency to name but a few areas. \n\n## Cross-functional exposure and understanding\nA goal for my internship experience was to gain exposure to different security teams and develop an understanding of the key functions performed to ensure and enhance the overall security posture of GitLab. \n \nGitLab’s [Security department](/handbook/security/) is organized around three key tenets that drive the structure and activities of the group, including: secure the product, protect the company and assure the customer. I had the opportunity to work across each of these teams and want to share some key learnings from each rotation.\n\n### Securing the product\nThis team works closely with engineering and product teams to ensure that all GitLab products securely handle the customer data with which we are entrusted. I was able to work with the teams in the [Application Security](/handbook/security/security-engineering/application-security/), [Security Research](/handbook/security/#security-research) and [Security Automation](/handbook/security/security-engineering/automation/) functions to gain a deeper appreciation of how they ensure all aspects of GitLab exposed to customers or that host customer data are held to the highest security standards.\n\nWorking with security engineers on our [Application Security](/handbook/security/security-engineering/application-security/) team, I had the chance to contribute directly to GitLab the product! 🎉  This involved [improving the current path traversal checks](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/33114) on user controlled file names and file paths. It was a collaborative effort between myself and other engineers through multiple code reviews and iterations that also helped me to sharpen my skills in coding with Ruby and produce well-written tests. Furthermore, I was able to triage a couple of reports in [GitLab’s bug bounty program](https://hackerone.com/gitlab). This enabled me to learn more about vulnerability identification and how the team handles bug reports from the first stage, where a bug is reported, to the last stage, where a security release is created to fix the bug. By reviewing past issues that the Application Security team had handled, I was able to develop a better understanding of the [security fix process](/handbook/security/#vulnerability-reports-and-hackerone). With respect to ‘shift left’, this enabled me to see how the team collaborates with other engineering and product teams to integrate security early in the development process by carrying out code security reviews on features.\n\nIn addition, I had pairing sessions with members of the [Security Research](/handbook/security/security-engineering/security-research/) team where I was able to learn about different bug-hunting approaches and current security vulnerability research areas being undertaken such as SAST/DAST tooling and dependency scanning. We also worked together to solve a couple of challenges from the 2020 GitLab capture the flag (CTF). Read about the CTF in [“How to play GitLab's Capture the Flag at home”](/blog/how-to-play-gitlab-ctf-at-home/) and try your hand!\n\nThe time I spent working with the [Security Automation](/handbook/security/security-engineering/automation/) team exposed me to the SaaS infrastructure that GitLab relies on with a special emphasis on Google Cloud Platform (GCP). I collaborated with another security engineer to design and implement automation efforts to assist with the management of anomalous resources in GCP, and further assist with the triage process of the reports on these resources.  Through [coffee chats](/company/culture/all-remote/informal-communication/#coffee-chats) with the rest of this team, I was able to gain an understanding of the current Security Automation initiatives surrounding the building of tools and services geared towards increasing efficiency and assisting other security teams in their work.\n\n### Protect the company\nThis group is responsible for “[shoring up and maintaining the security posture of GitLab.com to ensure enterprise-level security is in place to protect our new and existing customers](/handbook/security/#protect-the-company)” and I was fortunate to work across all three functional areas within this group: [Security Incident Response Team](/handbook/security/#sirt---security-incident-response-team-former-security-operations) (SIRT), [Trust and Safety](/handbook/security/#trust--safety) team and [Red Team](/handbook/security/threat-management/red-team/).\n\nWorking with the SIRT team was exciting as I got to learn how security incidents are managed by shadowing the security engineers on-call. This can be a very time-sensitive and fast-paced operation as incidents need to be handled quickly, but at the same time, precisely to avoid any further escalations. Additionally, I had the privilege to work with the team to help create detection rules using Python; I particularly enjoyed this since one of my favorite aspects of software engineering is coding! This gave me insight into how we can proactively detect threats in our environment and design appropriate response approaches.\n\nThe [Trust and Safety](/handbook/security/#trust--safety) team’s main objective is to ensure that GitLab.com is not abused by malicious users. I was able to contribute to this team’s efforts by developing an algorithm that could help to detect [file obfuscation](https://attack.mitre.org/techniques/T1027/), which is a trending abuse methodology used to hide malicious content. This was particularly interesting as we got to leverage the power of machine learning in the security domain. More about this project can be seen further down in this post!\n\nGitLab’s Red Team actively examines the security posture of the organization by carrying out exercises to establish threat models and escalate any security gaps that may be discovered during testing. My time spent on this team gave me the opportunity to get the team members’ perspectives on what it takes to be a ‘Red Teamer’ and how they support GitLab’s value of transparency in their day-to-day work. An interesting project that I was able to contribute to involved research on a machine learning algorithm that can help with secret scanning in GitLab repositories. This proof-of-concept was geared towards reducing the large number of false positives in the current state-of-the-art secret searching tools.\n\n### Assure the customer\nThis sub-department focuses on the mission to [“provide assurance to GitLab customers that any data shared with GitLab will be kept safe and our customer's privacy will be respected”](/handbook/security/security-assurance/) and includes the functions and subteams of [Field Security](/handbook/security/security-assurance/field-security/) and [Security Compliance](/handbook/security/security-assurance/security-compliance/).\n\nInterning within this group was a unique experience for me as I had not yet had the chance to try my hand at a [security analyst](https://handbook.gitlab.com/job-families/security/security-analyst/) role. Through this engagement, I gained visibility into how risk and compliance relate to the bigger security picture and became familiar with the various security compliance certifications and their relationship to the internal [GitLab Control Framework](/handbook/security/security-assurance/security-compliance/sec-controls.html). Specifically, I was able to look at the SOC2 industry standard and help to test controls such as data management, with respect to current vendor security review assessments.\n\n## A deeper dive: machine learning in security\n\nAs part of my internship here, I had the opportunity to focus more deeply on an area of specific interest to me: machine learning. GitLab is actively pursuing novel ways of integrating machine learning into its overall security model. Machine learning can offer multiple benefits in security-based use cases including detection of malicious activity and automation of repetitive security tasks. \n\nAs part of the anti-abuse efforts ongoing at GitLab, senior security engineer in Automation, [Melissa Rodriguez](https://gitlab.com/melissar) and I worked on creating an algorithm that could help to detect obfuscation in certain files. This involved research and learning how to build models that could find patterns in text, and using this to correctly classify regular files versus obfuscated files. I'm proud to say the algorithm I helped to develop with Melissa is going to be used in the detection of abusive activities such as cryptomining, where attackers tend to obfuscate their mining configurations.\n\nMachine learning is a fast-growing trend that has a myriad of applications in the security space, and it is important to consider how to take advantage of it to improve overall security posture and better protect customers.\n\n## Interested in joining GitLab?\nIf you would like to be a part of this amazing team and get to contribute to the GitLab product while enjoying the perks of all-remote, check out the [career opportunities](/jobs/) page and join our [talent community](https://boards.greenhouse.io/gitlab/jobs/4700367002?gh_src=d865c64f2us). You can also learn more about GitLab’s [culture](/company/culture/) and [values](https://handbook.gitlab.com/handbook/values/) in order to get an understanding of what it might be like to work here!\n\n\nCover image by [Christopher Gower](https://unsplash.com/@cgower) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[720,9,745],{"slug":6741,"featured":6,"template":680},"what-its-like-to-intern-in-gitlab-security","content:en-us:blog:what-its-like-to-intern-in-gitlab-security.yml","What Its Like To Intern In Gitlab Security","en-us/blog/what-its-like-to-intern-in-gitlab-security.yml","en-us/blog/what-its-like-to-intern-in-gitlab-security",{"_path":6747,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6748,"content":6754,"config":6760,"_id":6762,"_type":14,"title":6763,"_source":16,"_file":6764,"_stem":6765,"_extension":19},"/en-us/blog/what-its-like-to-interview-at-gitlab",{"title":6749,"description":6750,"ogTitle":6749,"ogDescription":6750,"noIndex":6,"ogImage":6751,"ogUrl":6752,"ogSiteName":667,"ogType":668,"canonicalUrls":6752,"schema":6753},"What it's like to interview at GitLab: A peek inside the recruitment process","A new GitLab team-member shares her experience of being recruited to GitLab, as well as some advice for potential candidates.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680526/Blog/Hero%20Images/interviewing-at-gitlab.jpg","https://about.gitlab.com/blog/what-its-like-to-interview-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What it's like to interview at GitLab: A peek inside the recruitment process\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gosia Ksionek\"}],\n        \"datePublished\": \"2019-03-28\",\n      }",{"title":6749,"description":6750,"authors":6755,"heroImage":6751,"date":6757,"body":6758,"category":808,"tags":6759},[6756],"Gosia Ksionek","2019-03-28","\nWhen [Zsuzsanna](/company/team/#zkovacs) from GitLab approached me on LinkedIn, I was sure I had no shot at getting an engineer's job at this kind of company.\nI decided to give it a try anyway, knowing that I can only gain experience and have nothing to lose.\n\nI have to admit, the whole process made me want to work for Gitlab even more, as each step of the way I could clearly see that company values are not only something written in the handbook, but clear guidelines for every part of the process.\nYou can read all about [GitLab's hiring processes here](/handbook/hiring/interviewing/), but I'll also describe each part of the recruitment process as I experienced it, how it was conducted, and what I can advise future candidates:\n\n### Stage 1: Questionnaire\n\nThe first stage was a questionnaire, with both general questions about education and experience,\nbut also two interesting technical questions with the mysterious instruction: \"Describe in as much detail as you think is appropriate,\"\nwhich allowed me to dive into details but also be concise when I felt I have nothing more to add.\nEven this part was educational and left me with some new knowledge!\n\n**Tip**: Take your time! It's not a race, better to get it right.\nWriting is not my forte, it took me over two weeks to write the answers at my own pace.\n\n### Stage 2: Screening call\n\nThe second stage involved talking to one of the GitLab team-members and let me use tools that are adopted among the GitLab team.\nThis first screening call contained general questions about my experience and why I applied.\n\n**Tip**: Read the handbook – not all of course, it's over 2,000 pages – but the general section about the company\nto understand the values and how you see yourself in this kind of environment.\n\n### Stage 3: Technical interview\n\nI was assigned a merge request and asked for a code review.\nDuring the actual interview, we discussed the code review and ways to improve the code.\nLater came time for, in my opinion, the most stressful part of the process: LIVE CODING – every programmer's nightmare.\nSuddenly I wasn't able to hit any proper key on my keyboard ...\nBut I was allowed to check any doubts in Google if needed and we ended the conversation with some time for me to ask questions\nabout GitLab, the process and remote setup.\n\n**Tip**: Don't stress out about live coding.\nAnd plug your laptop into the power source,\nthis interview may last for over an hour and with the video call, it can drain the battery really quickly!\n\n### Stage 4: Manager interview\n\nThe fourth stage was a great opportunity to know more about the team –\nas in GitLab, you are applying to the specific team.\nTalking to the manager was a great chance to ask all the questions I had about the everyday aspects of the job\nand to know who would be my potential teammates.\n\n**Tip**: Be prepared for a variety of questions, both technical and regarding soft skills.\n\n### Final stage\n\nThe last stage was very similar to the previous one, but with the person higher up in the organization.\nI need to say at this stage stress got the better at me – I really wanted it to go well.\n\n**Tip**: Just relax and prepare the same way as for the previous step.\n\n### References\n\nAfter all those steps I was asked to provide references.\nI chose a colleague I worked with at two different companies and my former manager.\n\n**Tip**: Think carefully who can provide the most valuable feedback about you. Not the most positive, of course, it doesn't hurt,\nbut also honest. Who knows your good sides and what can you improve.\n\nAnd after all those steps and stages, all I could do is wait for the final decision ...\n\nI can't emphasize enough how transparent the whole process was.\nI was informed at every stage what was ahead of me, I could pick which time worked best for me, and I got results quite quickly every time.\nPlus everyone was so nice, not only to me but also to my references, and this was so important to me\nas I was asking for a favour. I think all of this – making a candidate understand the process,\ntreating them with respect, and making it a nice experience overall, is a great example of acting according\nto the GitLab values in every way – even through the recruitment process.\n\nTL;DR: would apply again!\n\nPhoto by [Piotr Wilk](https://unsplash.com/photos/Kc-OBw1fMJg?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/home-office?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[810,9],{"slug":6761,"featured":6,"template":680},"what-its-like-to-interview-at-gitlab","content:en-us:blog:what-its-like-to-interview-at-gitlab.yml","What Its Like To Interview At Gitlab","en-us/blog/what-its-like-to-interview-at-gitlab.yml","en-us/blog/what-its-like-to-interview-at-gitlab",{"_path":6767,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6768,"content":6774,"config":6780,"_id":6782,"_type":14,"title":6783,"_source":16,"_file":6784,"_stem":6785,"_extension":19},"/en-us/blog/what-we-learned-by-taking-our-bug-bounty-program-public",{"title":6769,"description":6770,"ogTitle":6769,"ogDescription":6770,"noIndex":6,"ogImage":6771,"ogUrl":6772,"ogSiteName":667,"ogType":668,"canonicalUrls":6772,"schema":6773},"What we learned by taking our bug bounty program public","Six months into our public bug bounty program, we're taking stock of what's working and where we can make improvements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679034/Blog/Hero%20Images/inside-gitLab-public-bug-bounty-program.png","https://about.gitlab.com/blog/what-we-learned-by-taking-our-bug-bounty-program-public","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What we learned by taking our bug bounty program public\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ethan Strike\"}],\n        \"datePublished\": \"2019-07-19\",\n      }",{"title":6769,"description":6770,"authors":6775,"heroImage":6771,"date":6777,"body":6778,"category":720,"tags":6779},[6776],"Ethan Strike","2019-07-19","\nWhen [we opened up our bug bounty program to the public back in December 2018](/blog/gitlab-hackerone-bug-bounty-program-is-public-today/), we weren’t sure WHAT to expect. Certainly we anticipated a flood of new reports which would keep us occupied for quite some time, and the community did not disappoint! While this was true for the first few months, that spike has since evened itself out. We did encounter a few surprises, though, around the net number of new, unique reporters and the number of reports from unique reporters.\n\nIn the first seven weeks after making the program public, 42% of all reporters were first-time contributors, and 64% of all the reports received since going public were from first-time reporters to the GitLab program.\n\nSince taking the program public, we roughly doubled the number of valid reports in the program’s history. We have had a paid, private program since 2017, and this program included only the top 1-10% of HackerOne contributors, so opening our program up publicly has not only engaged a broad cross-section of the reporter community, but also made our products and services more secure. We took a closer look at [how we measure success in our public bug bounty program in an earlier blog post](/blog/inside-the-gitlab-public-bug-bounty-program/).\n\n## Triage and response\nResponding to the sheer volume of new reports coming in presents its own set of challenges. So, what does our triage and response process look like?\n\nFor new reports we use an automated bot to provide the initial response to reporters that includes our current triage ETA. This gives an estimation of how long it will take for our team to triage their report. Reports which clearly have a \"critical\" impact will be triaged first. Then, everything else is triaged according to the order submitted. This is important because it helps us to identify duplicate reports and gives fair priority.\n\nFor effective triage, it's paramount for reports to include clear proof of concepts and any other evidence which makes the impact evident to our triage team. Here we classify impact as the amount of affected assets multiplied against their sensitivity levels, according to our data classification policy. This and other factors help us to determine the appropriate severity and priority of an issue.\n\nWe also use an internally developed slack command to import triaged reports from HackerOne and into GitLab issues. We define the impacted project and appropriate labels as input, and then the script creates a new confidential issue. The correct team is then assigned, specifically the [product managers](/handbook/product/categories/), where they will take further action to schedule the fix with the engineering teams. Read more on our [issue triage process](/handbook/security/#issue-triage).\n\n![Thank you to our new reporters!](https://about.gitlab.com/images/blogimages/New-reporters-July2019.png){: .shadow.medium.center}\n\nRefining our triage process is just one area where we’ve built improvements based on lessons learned and the evolution of the public program over the last six months. If we look at overall results, we’ve got both positive and not-so-positive results we’re analyzing and improving upon. Our public program has certainly been impactful in the number of vulnerabilities we've identified:\n* From the public program debut through July 3rd, we received 205 valid vulnerabilities. Of that total, 89 vulnerabilities (43%) were from reporters new to the program.\n* In that same period, we received 10 critical-severity vulnerabilities, three of which were from new reporters.\n* And, of the 33 high-severity vulnerabilities reported, 24 (72%) were from new reporters.\n\nOn the flip side, we received an increased number of false positives. Of the 677 reports received through July 3rd, 277 were false positives; 215 (78%) of which were by reporters that started participating after the program went public. Overall though, we consider it a net win, because even these false positives allow us to refine and improve our triage and response processes.\n\n## Timely and accurate communications\nThe one area where we’re most looking to improve upon is communication. An effective feedback loop with our HackerOne reporters is vital to continued engagement and effective collaboration. Naturally, with the increased number of reports it’s even more challenging to keep reporters in the loop with timely information. Luckily, this is one area in which automation can help.\n\nPreviously, our security automation team had put together a bot that made first contact when a report was submitted. As the program has matured, our automation team has added the ability to send the reporter the expected date of fix, based on the milestone assigned to the issue; providing further transparency into our triage and response process. Initially, this information was collected by the triaging engineer, but utilizing the GitLab API allows for communication in a more timely manner.\n\nOutside of automation, we’ve implemented a rotation schedule within our team, which assigns a dedicated individual for H1 response and triage each week. This simple system has allowed us to work through our backlog and increase our responsiveness. We’ll continue to explore ways to keep our reporters best informed.\n\nWe’ve also tweaked how fixes are reported and scheduled based on lessons learned from the first few weeks of our public program. Previously, fixes were reported to engineering managers for each team, who fit them into each development cycle as needed. With the increased number of findings, however, we’ve adjusted the process so that the security team now assigns the due date, but the product manager is the single decision-maker for balancing feature and security deliverables. This allows us to better track company response times, and work with development teams to prioritize fixes.\n\n## Transparency and collaboration\n[Transparency](https://handbook.gitlab.com/handbook/values/#transparency) is one of our core values; everything we make at Gitlab is public by default and HackerOne reports are no different. We believe that publicly disclosing the reports we receive through our bug bounty program helps reduce the threshold to contribution because it allows researchers to learn and develop on top of other researchers’ findings.\n\nIt’s also noteworthy that the public bug bounty findings help us identify areas to focus on for developer secure coding training. For example, if we see a trend of a certain class of vulnerabilities, we can target education efforts for our developers around the recommended best practices to reduce the number of future reports relating to that class of vulnerabilities.\n\nOur bug bounty program has also delivered data and findings that prompted us to refine and improve how we approach [application security](/topics/devsecops/) at GitLab. Due to the significant volume of authorization issues reported, we realized that ensuring precision and accuracy of our [permissions model](https://docs.gitlab.com/ee/user/permissions.html) across the whole platform is an area that needs improvement. An efficient solution we are investigating is to automate these authorization checks via CI.\n\nAnother key finding this program helped us uncover is that certain classes of vulnerabilities appear repeatedly. Therefore we advocate code reuse through the use of security-focused libraries. This consolidates the security controls needed to prevent vulnerability classes such as SSRF from reappearing.\n\nWe’re proud to see the benefits and value being generated by our bug bounty program and specifically our reporter community, spread far beyond GitLab and across the industry.\n\nYou can always see the most up-to-date program stats on our public [HackerOne dashboard](https://hackerone.com/gitlab).\n\nCover image by [markus spiske](https://www.pexels.com/photo/photo-of-green-data-matrix-1089438/) on [Pexels](https://www.pexels.com)\n{: .note}\n",[267,720,9,3832],{"slug":6781,"featured":6,"template":680},"what-we-learned-by-taking-our-bug-bounty-program-public","content:en-us:blog:what-we-learned-by-taking-our-bug-bounty-program-public.yml","What We Learned By Taking Our Bug Bounty Program Public","en-us/blog/what-we-learned-by-taking-our-bug-bounty-program-public.yml","en-us/blog/what-we-learned-by-taking-our-bug-bounty-program-public",{"_path":6787,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6788,"content":6794,"config":6799,"_id":6801,"_type":14,"title":6802,"_source":16,"_file":6803,"_stem":6804,"_extension":19},"/en-us/blog/what-we-re-reading",{"title":6789,"description":6790,"ogTitle":6789,"ogDescription":6790,"noIndex":6,"ogImage":6791,"ogUrl":6792,"ogSiteName":667,"ogType":668,"canonicalUrls":6792,"schema":6793},"What we're reading","GitLab team-members are a passionate group of learners who enjoy reading to strengthen their skills, develop new techniques, and enhance their knowledge.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683225/Blog/Hero%20Images/gitlabreading.jpg","https://about.gitlab.com/blog/what-we-re-reading","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What we're reading\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-08-27\",\n      }",{"title":6789,"description":6790,"authors":6795,"heroImage":6791,"date":6796,"body":6797,"category":808,"tags":6798},[930],"2018-08-27","\nAt GitLab, we ❤️ reading. Here are a few of our recent pageturners.\n\n### How Rust’s standard library was vulnerable for years and nobody noticed\n[Nick Thomas](/company/team/#nick.thomas), Staff Developer, enjoyed\n[this article](https://medium.com/@shnatsel/how-rusts-standard-library-was-vulnerable-for-years-and-nobody-noticed-aebf0503c3d6)\non Rust, a new systems programming language. Nick commented, \"It was very\ninteresting from a general-security point of view, especially the\n'everything is broken' and 'here is how security advisories actually work' bits.\"\n\n### Designing Delivery: Rethinking IT in the Digital Service Economy\n[Kristie McGoldrick](/company/team/#Krist_McG), Solutions Architect, has been devouring\n[this book](https://www.amazon.com/Designing-Delivery-Rethinking-Digital-Service/dp/1491949880/ref=mt_paperback?_encoding=UTF8&me=&qid=),\nfinding it \"very applicable to GitLab.\"\n\n### It's the Future\n[Lin Jen-Shin](/company/team/#godfat-gitlab), Developer, fondly reflects on\n[this CircleCI blog post](https://circleci.com/blog/its-the-future/), saying\n\"I think I'll remember this post forever. One person is trying to persuade\na teammate, who just wants to get things done right now, to use future technology.\nThe future tech is not mature enough to get things done easily, so it\novercomplicates a lot of things, and the teammate just wants something simple. It's\npretty technical, but it's funny! That future tech is getting closer to becoming\nmature now, and GitLab is  trying to use Kubernetes and collaborating with Google\nto make this tech better.\"\n\n### The Phoenix Project: A Novel about IT, DevOps, and Helping Your Business Win\n\n[Rebecca Dodd](/company/team/#rebecca), Content Editor, recently read\n[\"The Phoenix Project\"](https://www.amazon.com/Phoenix-Project-DevOps-Helping-Business/dp/0988262509)\nand confidently declared that, \"I think we’ve\nall read 'The Phoenix Project.'\"\n\n### The DevOps Handbook: How to Create World-Class Agility, Reliability, and Security in Technology Organizations\n\n[Mike Miranda](/company/team/#zmikemiranda), Sales Development Representative, just\npicked up [\"The DevOps Handbook\"](https://www.amazon.com/DevOps-Handbook-World-Class-Reliability-Organizations-ebook/dp/B01M9ASFQ3/)\nto understand the needs of IT leaders.\n\n",[9],{"slug":6800,"featured":6,"template":680},"what-we-re-reading","content:en-us:blog:what-we-re-reading.yml","What We Re Reading","en-us/blog/what-we-re-reading.yml","en-us/blog/what-we-re-reading",{"_path":6806,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6807,"content":6813,"config":6818,"_id":6820,"_type":14,"title":6821,"_source":16,"_file":6822,"_stem":6823,"_extension":19},"/en-us/blog/what-were-reading-in-september",{"title":6808,"description":6809,"ogTitle":6808,"ogDescription":6809,"noIndex":6,"ogImage":6810,"ogUrl":6811,"ogSiteName":667,"ogType":668,"canonicalUrls":6811,"schema":6812},"What we've been reading in September","We've been busting out our bookmarks this month – discover what we've been reading.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678773/Blog/Hero%20Images/septemberreading.jpg","https://about.gitlab.com/blog/what-were-reading-in-september","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What we've been reading in September\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-09-25\",\n      }",{"title":6808,"description":6809,"authors":6814,"heroImage":6810,"date":6815,"body":6816,"category":808,"tags":6817},[930],"2018-09-25","\nTo get back into the swing of things after our [recent summit](/blog/gitlab-summit-cape-town-recap/),\nwe've been reading. Here's what we've been discussing on Slack:\n\n### [I just deployed a serverless app – and I can't code. Here's how I did it](https://medium.freecodecamp.org/i-just-deployed-a-serverless-app-and-i-cant-code-here-s-how-i-did-it-94983d7b43bd).\n\n[Erica Lindberg](/company/team/#EricaLindberg_), Content Marketing Manager, raves about\nthis Medium article,\nsaying \"I loved this article, and this woman might be my new hero.\"\n\n### [High Growth Handbook](https://www.amazon.com/High-Growth-Handbook-Elad-Gil/dp/1732265100)\n\n[Sid Sijbrandij](/company/team/#sytses), CEO, is currently reading and loving\nthis insightful playbook.\n\n### [The Design of Everyday Things](https://www.amazon.com/Design-Everyday-Things-Revised-Expanded/dp/0465050654/ref=sr_1_1?ie=UTF8&qid=1536093671&sr=8-1&keywords=The+Design+of+Everyday+Things%3A+Revised+and+Expanded+Edition)\n[Jeremy Watson](/company/team/#d3arWatson), Product Manager, has been rereading this\npowerful book, saying\n\"It's a constant reminder to practice mindfulness in the things we ship at GitLab, and to make\nsure we're adding features to the product that are consciously designed. The book\nalways serves as a reminder that we're all designers; it's our collective\nresponsibility to ensure that we're intentionally solving the right problems for our users.\"\n\n### Go 2 Draft Designs\n\n[Eric Johnson](/company/team/#edjdev), VP of Engineering, has been reading about the\nbig news: [Generics in Golang 2.0](https://blog.golang.org/go2draft)! Woohoo!\n\n### [Storytelling with Data: A Data Visualization Guide for Business Professionals](https://www.amazon.com/Storytelling-Data-Visualization-Business-Professionals/dp/1119002257)\n\n[Emilie Schario](https://gitlab.com/emilie), Data Analyst, has been enjoying\nthis riveting read,\nand says, \"In my role as a data analyst, I spend a lot of time helping different\nteams understand what the data they're collecting or using mean. Clear\ncontext, useful narratives, and consistent visuals help me empower stakeholders\nto make data-driven decisions.\"\n\n[Cover image](https://unsplash.com/photos/XT-o5O458as?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by [Ugur Akdemir](https://unsplash.com/@ugur), licensed\nunder [CC X](https://unsplash.com/license)\n{: .note}\n",[9],{"slug":6819,"featured":6,"template":680},"what-were-reading-in-september","content:en-us:blog:what-were-reading-in-september.yml","What Were Reading In September","en-us/blog/what-were-reading-in-september.yml","en-us/blog/what-were-reading-in-september",{"_path":6825,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6826,"content":6832,"config":6836,"_id":6838,"_type":14,"title":6839,"_source":16,"_file":6840,"_stem":6841,"_extension":19},"/en-us/blog/whats-in-your-backpack",{"title":6827,"description":6828,"ogTitle":6827,"ogDescription":6828,"noIndex":6,"ogImage":6829,"ogUrl":6830,"ogSiteName":667,"ogType":668,"canonicalUrls":6830,"schema":6831},"GitLab's top tools for remote workers","GitLab team members open their backpacks to share their top tools for remote work.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678459/Blog/Hero%20Images/darren_backpack_iceland.jpg","https://about.gitlab.com/blog/whats-in-your-backpack","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's top tools for remote workers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-10-10\",\n      }",{"title":6827,"description":6828,"authors":6833,"heroImage":6829,"date":6469,"body":6834,"category":808,"tags":6835},[672],"\n_At GitLab, our team doesn’t wake up at the same time and commute the same routes to sit in the same office. In fact, some of our team members don’t have an office at all! As a globally distributed company with an all-remote workforce, we have an exceptionally diverse set of team members spread over multiple continents. In other words, we're uniquely positioned to identify the top tools for remote workers. In this series, we explore how GitLab team members use the autonomy our company affords them to create workspaces that suit their lifestyle and cater to their hierarchy of needs, whether that involves creating a cozy home office space or diving into the unknown by working while traveling. See how we make it work by reading [part 1](/blog/not-everyone-has-a-home-office/) and [part 2](/blog/how-to-push-code-from-a-hammock/) of our remote work series._\n\nWhen you’re working far from home sometimes you wind up at a sleek coworking space and other times you land in the – literal – middle of nowhere. GitLab team members that work from the road will tell you that while leaning into adventure is a rush, it’s best to come prepared.\n\n![Middle of Nowhere](https://about.gitlab.com/images/blogimages/backpack/nowhere.jpg){: .shadow.small.center}\nKerri Miller is fond of exploring small quirky towns by motorcycle, but every once in a while she ends up someplace she'd never expected.\n{: .note.text-center}\n\n“I’m always re-evaluating what I bring, and every trip involves experimenting with some new piece of gear or different approach to the routine,\" says [Kerri Miller](/company/team/#kerrizor), Create backend engineer at GitLab. Kerri lives in Seattle, Washington but spends almost half the year adventuring across North America on her motorcycle.\n\n“I have a bit more leeway than most travelers, since I’m not limited to just a backpack or a single piece of luggage, but I do have to carry quite a bit of other gear to support the motorcycle – tools, spare tubes for the tires, rain gear, camping gear, etc. – so space and weight are still a premium,\" says Kerri. \"I take a lot of inspiration from the ultralight backpackers and the ‘1 bag’ traveler.\"\n\n## Favorite remote work backpacks\n\nLet’s face it: The backpack or bag itself is critical to the digital nomad experience. The type of bag you require will vary in texture, size, and durability depending upon where and under what conditions you’re traveling, how much you’re packing, and whether you’re prioritizing sturdiness or style – but truly, why compromise on either?\n\nJust like Kerri, professional services engineer [Mike Lindsay](/company/team/#mlindsay) enjoys hitting the open road by motorcycle.\n\n“I road warrior it up to customer engagements probably once a month,\" says Mike. “The bag is a Swiss Army backpack, I love it. It opens up like a clam shell, so you can expose the laptop without actually taking it out. The back **AND** the bottom are padded, so my laptop doesn't take any hard knocks, even when dropping it on the ground. The big non-laptop pockets usually get whatever reading material or swag I'm taking with me.\"\n\n[Justin Boyson](/company/team/#jboyson), frontend engineer for Create:Source Code, uses a roll-top waterproof[ Kriega](https://kriega.us/us10) bag, which, incidentally, is a favorite of many motorcyclists: “It's awesome because it looks cool and is completely rainproof,\" Justin says.\n\n[Taylor Medlin](/company/team/#tmedlin), solutions architect, Americas, uses the [Topo Designs Rover Pack](https://topodesigns.com/collections/laptop-bags/products/rover-pack?variant=12789839953973), which is locally crafted in her home state of Colorado and has bright colors for a fun, retro vibe.\n\n[Jackie Gragnola](/company/team/#jgragnola), marketing programs manager at GitLab, is based in San Francisco, California but seems to always be on the move from one city to the next. She can fit most everything she needs inside her go-to purse, which she bought while abroad in Lima, Peru.\n\n![What's in your purse](https://about.gitlab.com/images/blogimages/backpack/whats-in-your-purse.jpg){: .shadow.small.center}\nSometimes you stumble upon the perfect purse at your neighborhood boutique or a big box store. Othertimes, you find it in Peru.\n{: .note.text-center}\n\nIf Jackie needs to bring along more than her usual set-up, she’ll use her backpack of choice: The [Nomatic day backpack](https://www.nomatic.com/products/the-nomatic-backpack).\n\n“It can be locked and attached to a table and is great if working out of a coffee shop,\" Jackie says. \"It has lots of compartments and is perfect for safety and security while traveling.\"\n\n## GitLab: Tools for remote workers unpacked\n\nIn order to effectively work from anywhere, the remote worker really only needs four things: a backpack or bag of sorts, a laptop, WiFi, and power. While the rest of the things in your backpack might be non-essentials in terms of work, being uncomfortable or less effective for the sole reason of traveling light is not always the best way to go. GitLab team members unpacked their bags to show us the equipment they use to set up a satellite workspace from just about anywhere.\n\nMike gave us a tour inside his beloved Swiss Army backpack.\n\n![Swiss Army Backpack](https://about.gitlab.com/images/blogimages/backpack/mike.jpg){: .shadow.small.center}\nMike Lindsay's backpack is durable and can withstand the elements on the back of his motorcycle.\n{: .note.text-center}\n\n*   **Top pocket**: Network cable, dual port USB charger with squid cable (in case I make friends!), extra thumb drives, wired earphones (maybe earbuds are dead, or inflight screen can use them).\n*   **Left side pocket**: battery backup, bandaids, glasses cleaner and cloth, toothpaste.\n*   **Right side pocket**: Spare Mac power brick with extension cable adapter.\n*   **Lower middle pocket**: Bag of geek stickers, snap on key ring, pens, Mac USB-C adapter.\n\nThe bright colors of Taylor's Topo Designs backpack are matched by its brightly colored contents.\n\n\"I use the black notebook for GitLab-specific notes and an orange notebook for daily planning,\" she says. \"GitLab stickers, peppermint chapstick, lipstick, USB-C adaptor, [Thread wallet](https://www.threadwallets.com/), [Apple Pencil](https://www.apple.com/apple-pencil/), [iPad Pro](https://www.apple.com/ipad-pro/), [Apple Magic trackpad](https://www.apple.com/shop/product/MRMF2LL/A/magic-trackpad-2-space-gray), MacBook Pro, Nalgene bottle.\"\n\n![Topo Designs Backpack](https://about.gitlab.com/images/blogimages/backpack/taylor.jpg){: .shadow.small.center}\nInside Taylor's colorful backpack we find something that isn't mentioned by any other GitLab team members: pen and paper!\n{: .note.text-center}\n\nKerri has a few necessities to make engineering from the road a little less hectic than it might otherwise be with just a laptop and charger.\n\n“I always travel with a small power strip that has 3 AC plugs and 3 USB ports, and a short 8\" cable. This is essential for charging all my devices and accessories without hogging all the plugs!\" says Kerri. She also brings a compact mechanical keyboard. “Most laptop keyboards I find not only fatiguing, but their delicate keys don’t always hold up to the demands of a nomad traveler,\" she explains.\n\n![Kerri and her motorcycle](https://about.gitlab.com/images/blogimages/backpack/kerri.jpg){: .shadow.small.center}\nKerri working on GitLab from the back of her motorcycle.\n{: .note.text-center}\n\n“I get stopped in coffee shops and coworking spaces all the time about my setup,\" says Jackie. “It’s not great for productivity, but if I was making a commission from these convos this would be a solid side gig.\"\n\n![Jackie's workplace setup](https://about.gitlab.com/images/blogimages/backpack/dual-screen-setup.jpg){: .shadow.small.center}\nJackie's dual screen setup in Valencia, Spain.\n{: .note.text-center}\n\nTo set-up her typical workplace, Jackie uses:\n\n*   [Roost stand](https://www.therooststand.com/)\n*   [Anker bluetooth keyboard](https://www.anker.com/products/variant/anker-bluetooth-ultraslim-keyboard/A7726111)\n*   [Apple magic mouse](https://www.apple.com/shop/product/MRME2LL/A/magic-mouse-2-space-gray)\n*   [Asus external monitor 169B+](https://www.asus.com/us/Monitors/MB169BPlus/HelpDesk_Download/)\n*   [Apple airpods](https://www.apple.com/airpods/)\n*   Backup wired earpods if needed\n\n[Erich Wegscheider](/company/team/#ewegscheider), talent operations specialist at GitLab, is [currently in Bali on a coworking adventure with WiFi Tribe](/blog/not-all-remote-is-created-equal/). Like Jackie, Erich uses the Apple magic mouse 2, and also the [Apple magic keyboard](https://www.apple.com/shop/product/MLA22LL/A/magic-keyboard-us-english), along with universal power adapters and a power bank in case the power goes out.\n\nErich also brought a laptop stand with him on his journey. He says the [Tiny Tower Laptop Stand](https://tinytowerstand.com) is “key to helping maintain a healthy posture while working _without_ a proper monitor.\" Sadly, not everything fits comfortably in a backpack.\n\n![Bali workspace](https://about.gitlab.com/images/blogimages/baliworkspace.png){: .shadow.small.center}\nErich managed to configure an ergonomic workspace in Bali.\n{: .note.text-center}\n\nPeople experience associate at GitLab, Caroline, is working as she explores Europe, and if there is one thing that she always, without fail, has in her backpack, it’s power adapters.\n\n\"Call it paranoia but I always pack US, UK, and EU extra adapters/converters,\" says Caroline. There is a background story here. Caroline, who lives in Kenya, traveled to South Africa for the first time last year to meet up with some GitLab colleagues.\n\n\"I got to my room in South Africa five minutes before a meeting only for the outlets to look totally alien to me,\" she says. \"I didn't know they were the **only country in the world** that used such plugs and needless to say, I missed the meeting.\"\n\nShe also has an extra phone with her so she can easily create a WiFi hotspot.\n\n## GitLab’s roadtrip essentials\n\nIt’s not _quite_ the same as working out of a backpack, but GitLab product manager Nicole Schwartz has been on a months-long roadtrip across the United States, living out of a suitcase and the trunk of her car as she visits friends, GitLab team members, and speaks at conferences along the way.\n\nLike Caroline, Nicole also has an extra phone for WiFi tethering and also recommends a nice set of noise-cancelling headphones (a must-have even if you’re working at home!), a portable mouse and mousepad, extension cord and powerstrip, and, if you have room, a USB monitor which is great for when you can work from a hotel room.\n\n“Download podcasts and YouTube videos to listen to on the drive since the radio will cut in and out and you might as well be productive,\" Nicole says.\n\nSince GitLab is a global, asynchronous team, most team meetings are recorded and uploaded to our [GitLab Unfiltered channel on YouTube](https://www.youtube.com/channel/UCMtZ0sc1HHNtGGWZFDRTh5A), and a few teams even use the [audio from meetings to create podcasts](/blog/how-we-turned-40-person-meeting-into-a-podcast/) to make it easy to stay up-to-date on what’s happening.\n\nNicole has a few recommendations for anyone considering working and traveling for an extended period of time, including packing laundry detergent (she uses TidePods) and having dollars and quarters on-hand to pay bridge and road tolls (and also feed washers and dryers).\n\n## Outreach, from your backpack\n\nThere are so many perks that come with working at GitLab: The fact that we are family-first not just in principle, but in practice; the personal and professional autonomy our company affords us; unlimited PTO and being encouraged to _actually use it_; and of course, the fact that we are all remote. But at the end of the day, the best brand ambassadors are all of us.\n\nInside his rolltop, rain-proof Kriega backpack, Justin brings a laptop and charger, as well as “backup wired earbuds, because airpods don't last forever. Oh, and a bag of GitLab stickers!\"\n\n![Stickers](https://about.gitlab.com/images/blogimages/backpack/justin.jpg){: .shadow.small.center}\nNo backpack is fully packed without GitLab swag.\n{: .note.text-center}\n\nJustin is certainly not the only GitLab team member who carries stickers in his backpack. You may have noticed in our photos that, like laptop stands and bluetooth headphones, GitLab stickers and other treats often come along with our team members, whether they're just stopping in their neighborhood coffee shop or traveling thousands of miles from home.\n\n“It isn’t a work essential per se, but I also try to have a stash of stickers, and some kind of snack treats from Seattle – small packs of salmon, bonbons from a local manufacturer, or small sample packs of coffee from a local roaster,\" says Kerri. “I’ll try to gift these to the folks who help me out on the road, who give me directions, provide a place to stay, or to cafe managers who turn a blind eye to me staying in one place for several hours.\"\n\n**More tips for productive remote working:**\n\n[5 remote work best practices](/blog/mastering-the-all-remote-environment/)\n[Tried and true remote work productivity hacks](/blog/how-to-build-a-more-productive-remote-team/)\n[Be the boss of your video call](/blog/tips-for-mastering-video-calls/)\n\nCover image by Darren Murph\n{: .note}\n",[832,9],{"slug":6837,"featured":6,"template":680},"whats-in-your-backpack","content:en-us:blog:whats-in-your-backpack.yml","Whats In Your Backpack","en-us/blog/whats-in-your-backpack.yml","en-us/blog/whats-in-your-backpack",{"_path":6843,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6844,"content":6850,"config":6854,"_id":6856,"_type":14,"title":6857,"_source":16,"_file":6858,"_stem":6859,"_extension":19},"/en-us/blog/whats-it-like-to-work-security-at-gitlab",{"title":6845,"description":6846,"ogTitle":6845,"ogDescription":6846,"noIndex":6,"ogImage":6847,"ogUrl":6848,"ogSiteName":667,"ogType":668,"canonicalUrls":6848,"schema":6849},"What’s it like to work in security at GitLab?","Job descriptions and the job they represent don't always line up.  What does someone working in our Security department actually do?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671032/Blog/Hero%20Images/wocintechchat_blog2.jpg","https://about.gitlab.com/blog/whats-it-like-to-work-security-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What’s it like to work in security at GitLab?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2021-01-07\",\n      }",{"title":6845,"description":6846,"authors":6851,"heroImage":6847,"date":6394,"body":6852,"category":698,"tags":6853},[1010],"\n\n{::options parse_block_html=\"true\" /}\n\n\n\nThis is post 2 of a 3 part series profiling several women in GitLab’s security organization.  See part one, [\"How to break into security\"](/blog/breaking-into-security/)and three, [\"Considering a career in security? Here’s some advice.\"](/blog/considering-a-career-in-security/).\n{: .note}\n\n_Breaking into technology, and security, can be difficult for anyone. At GitLab [31% of our workforce identifies as women](/handbook/people-group/people-success-performance-indicators/#diversity---women-at-gitlab). In our security department we have ten team members who are women out of a total of 48 team members; that’s 21%.  Global women in tech numbers are around 21.4% according to [CNET](https://www.cnet.com/news/microsofts-first-in-depth-diversity-report-shows-progress-remains-slow/) and this recent study, [“Resetting Tech Culture”](https://www.accenture.com/us-en/blogs/accenture-research/why-tech-is-losing-women-just-when-we-need-them-the-most) indicates that young women who go into tech drop out by the age of 35.  How do we change this?  GitLab is looking to help there through our [outbound hiring model](/handbook/hiring/candidate/faq/), [tracking and working toward key metrics](/handbook/people-group/people-success-performance-indicators/#diversity---women-in-management), [inclusion training](/company/culture/inclusion/#diversity-inclusion--belonging-training-and-learning-opportunities), [team member resource groups](/company/culture/inclusion/erg-guide/#how-to-join-current-tmrgs-and-their-slack-channels), Engineering department-based developmental and networking groups (like our [Women in Security group](/handbook/security/women-in-security.html)), building and fostering an [inclusive remote culture](/company/culture/inclusion/building-diversity-and-inclusion/) and [mentorship programs](/company/culture/inclusion/erg-minorities-in-tech/mentoring/)._\n\nReading a job description can only shed so much light on a role.  When considering a company or career path, it helps to understand what the organization, the roles and the responsibilities look like, from the inside.  This is part 2 of a 3 part series where 8 women in our Security department share details about their roles and the actual projects they are working on.\n\n#### We asked:\n* What do you do and what are some recent projects you’re working on?\n* What’s something new and/or exciting that you’d like to learn or be involved in?\n* If someone was interested in a role like yours, what’s the most helpful piece of advice you could offer?\n\n---\n\n### [Julia Lake](/company/team/#julia.lake) - Director, [Security Risk and Compliance](/handbook/security/#assure-the-customer---the-security-assurance-sub-department)\nJoined GitLab April 2020 / Connect with Julia on [LinkedIn](https://www.linkedin.com/in/julia-lake-16843740/)\n\n![Julia Lake](https://about.gitlab.com/images/blogimages/working-in-security/jlake_blog2.png){: .shadow.small.left.wrap-text}\n\n**What do you do and who do you collaborate with in your role?**\nI am responsible for the Security Assurance sub-department, which includes the security compliance, security operational risk and field security functions. Security Assurance is part of the [Security department](/handbook/security/#security-department), which is part of the broader [Engineering organization](/handbook/engineering/) at GitLab, and we work cross-functionally across the entire organization. We are extremely focused on information security and partner with system and process owners in order to ensure security controls and best practices are embedded throughout our environment. We also support our customers in their assessment of GitLab’s security practices and provide feedback from the field to drive internal security strategy.\n\n**What are some projects you’re working on?**\nAs an organization, some recent projects we’ve embarked on include: SOC 2 Type 2 and SOC 3 audit and report reviews, third party GRC application deployment, customer and sales enablement program development, and deployment of an operational risk management function.  Personally, I’ve been focused on organizational strategy and roadmapping, policy definition and metric redesign.\n\n**What’s something new and/or exciting that you’d like to learn about or be involved in?**\nI’m always interested in learning more about the different functions of security. Lately I’ve been particularly fascinated in learning more around Zero Trust architecture and best practices and am slowly making my way through [NIST 800-207](https://csrc.nist.gov/publications/detail/sp/800-207/final).\n\n**If someone was interested in a role like yours, what’s the most helpful piece of advice you could offer?**\nGo for it! Security is so incredibly dynamic and you can choose a career path that aligns with your specific interests. Security Assurance is especially interesting to me because we are truly leading the charge on helping organizations grow and mature their security posture, and we have the opportunity to partner with our wonderful customers along the way. My biggest piece of advice for Security Assurance professionals is to challenge yourself against complacency, be adaptive to change and think critically about how new requirements can be applied to meet intent without hindering the business. Also, good documentation is a shield.\n\n---\n\n### [Jennifer Blanco](/company/team/#jblanco2) - Sr. [Risk and Field Security](/handbook/security/security-assurance/field-security/) Engineer\nJoined GitLab June 2019 / Connect with Jennifer on [LinkedIn](https://www.linkedin.com/in/jenniferblanco1/)\n\n![Jennifer Blanco](https://about.gitlab.com/images/blogimages/working-in-security/jblanco_blog2.png){: .shadow.small.right.wrap-text}\n\n**What do you do and who do you collaborate with in your role?**\nMy focus is on [Third Party Risk Management](/handbook/security/security-assurance/security-risk/third-party-risk-management.html), specifically creating processes to evaluate the security maturity of organizations to ensure they can meet or exceed GitLab’s own standards. This includes traditionally-procured vendors and other third parties that could impact GitLab through activities such as handling our sensitive data or providing a service that is a dependency to our business operations and product offerings. I’ve been iterating on the program to methodically focus on third parties most critical to GitLab while building out the security aspects assessed to identify the risk level to GitLab. Such considerations include: data protections the third party has in place, their organizational security management practices, the technical posture of products, and the ability to support our compliance to customer, industry and regulatory requirements. I partner with teams including Security Compliance, [Application Security](/topics/devsecops/), Legal, Procurement and IT to gather salient inputs that feed into the program’s evolution.\n\n**What are some projects you’re working on?**\nI partnered with my team members working on Security Operational Risk Management (StORM) to create the inherent risk rating scoring for third-party security reviews which effectively narrows the scope for our reviews to the most adverse impact on GitLab. I created a supplemental third-party hardening guide meant to be consumed by business owners and third parties directly, and I’m working on an internal guide on how to share GitLab data externally. I’ll be focusing on expanding third-party reviews to product assessment with the Application Security team and automating these in a more technical fashion. Other contributions I’ve made are identifying contractor requirements for elevated access and reviews for free apps which focus heavily around Terms of Service and Privacy Policy; since nothing is ever truly free.\n\n**What’s something new and/or exciting that you’d like to learn or be involved in?**\nMy goal is to become a Data Privacy expert to intersect my interests in systems security, regulatory compliance and ultimately contribute to industry and public policy around big data. Having worked on contracts for both the customer and vendor side, I know the importance of understanding the inner workings of generating and processing data to uncover all the critical paths to assess the adequacy of safeguards. But in addition to being a Security professional, I’m a consumer who wishes to protect my information by raising the bar in the industry and creating mechanisms to keep companies accountable. This is important work because industries can’t evolve along with the ingenious new threats without practitioners who really “get it”, from both a technical and risk perspective.\n\n**If someone was interested in a role like yours, what’s the most helpful piece of advice you could offer?**\nThird party management differs by industry but one thing is constant: risk management. I recommend learning how to think about risk so that you can sniff it out and create relevant treatment plans. If specifically interested in the technology space, I would start by reviewing top companies’ security statements to understand how the leaders in the industry are protecting their customer assets. I’ve seen a lot of companies phase from keeping information tightly restricted to becoming more transparent so you can learn a lot about an operation from their public-facing materials. Remember to “follow the data” as a detective would follow the money. Data is big business nowadays and it’s just the beginning so learning how to sleuth out data, typically one of the most important assets for companies, will help in guiding your security reviews. On a final note, don’t be discouraged if you didn’t follow an Information or Computer Science track in your academic career. In this information age, there’s no shortage of resources as long as you have the drive to take advantage of it. Be cognizant of how you want to shape your career and take even the tiniest steps towards it; it adds up over time.\n\n---\n\n### [Juliet Wanjohi](/company/team/#jwanjohi) - Security Engineer, [Security Automation](/handbook/security/security-engineering/automation/)\nJoined GitLab May 2020 / Connect with Juliet on [LinkedIn](https://www.linkedin.com/in/juliet-wanjohi/) and [Twitter](https://twitter.com/jay_wanjohi)\n\n![Juliet Wanjohi](https://about.gitlab.com/images/blogimages/working-in-security/jwanjohl_blog2.png){: .shadow.small.left.wrap-text}\n\n**What do you do and who do you collaborate with in your role?**\nI recently joined the Security Automation team as a Security Engineer after an exciting [summer internship in GitLab’s Security department](/blog/what-its-like-to-intern-in-gitlab-security/). My main responsibilities include the design, build and deployment of security tooling and automation in order to help speed up security-specific efforts. This involves working with my fellow team members as well as various GitLab users and customers. At the moment, I am ramping up my skills and knowledge in languages, tools and technologies that our team uses in their automation efforts.\n\n**What are some projects you’re working on?**\nCurrently as a team effort, we’re building an anti-spam service that will aid in the identification and prevention of spam-related content across GitLab the product. Through this project, I am getting the chance to take part in technology research and architectural conversations related to building the product and how it will ultimately be consumed by users. Previously, during my internship, I was also able to work on a variety of projects ranging from improving path traversal checks on file names and file paths for GitLab the product to using machine learning techniques for security detection use-cases.\n\n**What’s something new and/or exciting that you’d like to learn or be involved in?**\nI am interested in learning more about securing cloud infrastructure and cloud native applications. Considering a lot of applications are moving to the cloud, I feel that this would be a very strong skill set to have moving into the future. An interesting avenue that I would like to pursue further is focusing on protecting [Machine Learning as a Service](https://www.frontiersin.org/articles/10.3389/fdata.2020.587139/full) cloud platforms.\n\n**If someone was interested in a role like yours, what’s the most helpful piece of advice you could offer?**\nBuilding yourself a support network of friends, mentors and peers can go a long way in helping you shape your security career. This can be in the form of seeking advice on career goals and/or guidance on resources that can help you grow your knowledge and skill set. Taking each day as an opportunity to learn something new is also super important as one needs to keep up with changing technological trends in security.\n\n---\n\n### [Liz Coleman](/company/team/#lcoleman) - Sr. Security Assurance Engineer, [Compliance](/handbook/security/security-assurance/security-compliance/ )\nJoined GitLab January 2020 / Connect with Liz on [LinkedIn](https://www.linkedin.com/in/elizabeth-coleman-5779418b/)\n\n![Liz Coleman](https://about.gitlab.com/images/blogimages/working-in-security/lcoleman_blog2.png){: .shadow.small.right.wrap-text}\n\n**What do you do and who do you collaborate with in your role?**\nI am currently part of the Security Compliance team and my main responsibilities include managing the SOC 2 program, user access reviews, control testing and any other ad hoc security compliance related activities that come my way. As compliance initiatives span the entire organization, I work with a variety of other teams in order to get my job done.\n\n**What are some projects you’re working on?**\nRight now we are in the process of obtaining our SOC 2 Type 2 certification. This has required a continuous effort in order to get our GitLab Control Framework (GCF) control set up and running, tested, and into a state of continuous control monitoring. As the [directly responsible individual](/handbook/people-group/directly-responsible-individuals/#what-is-a-directly-responsible-individual) for the SOC 2 program, I have been living and breathing SOC-related control testing, project management and external audit preparation for the last few months now. It’s quite a bit of work but I know it will be well worth it once GitLab obtains their certification.\n\n**What’s something new and/or exciting that you’d like to learn or be involved in?**\nI’ve always been interested in learning more about the growth of cloud native computing and how organizations have had to adapt and change processes or procedures in order to best manage workflows. Right now, I’m currently working on expanding my ISO27001 knowledge as that is next on the horizon for possible GitLab certifications.\n\n**If someone was interested in a role like yours, what’s the most helpful piece of advice you could offer?**\nOpen your mind and put yourself in a mental space of learning and growing from everyone around you. Working in security compliance requires knowledge and awareness about all aspects of an organization. Having that general understanding of which teams do what and why will help develop your comprehension of compliance requirements by function, team, and holistically for your organization.\n\n---\n\n### [Meghan Maneval](/company/team/#mmaneval20) - Manager, [Risk and Field Security](/handbook/security/security-assurance/field-security/)\nJoined GitLab July 2020 / Connect with Meghan on [LinkedIn](https://www.linkedin.com/in/meghanmaneval/)\n\n![Meghan Maneval](https://about.gitlab.com/images/blogimages/working-in-security/mmaneval_blog2.png){: .shadow.small.left.wrap-text}\n\n**What do you do and who do you collaborate with in your role?**\nI am the Manager of Risk and Field Security and work with an amazing team of Risk and Field Security Assurance Engineers here at GitLab. With my position and responsibilities I also work very closely with my fellow Security Managers, members of Sales and [Customer Success](/handbook/customer-success/), and GitLab team members across the organization. My team’s goal is to identify risks that could negatively impact GitLab and our ability to meet our goals.\n\nIf you think of your car, we are your safety features and focus on three main areas of security:\n* [Field Security](/handbook/security/security-assurance/field-security/customer-security-assessment-process.html) is like your car insurance. We assure our customers that we can meet their security needs and thus protect our revenue stream.\n* [Third Party Risk](/handbook/security/security-assurance/security-risk/third-party-risk-management.html) is like your lane assistance. We identify risks from third parties and direct the organization away from danger.\n* [Security Operational Risk](/handbook/security/security-assurance/security-risk/storm-program/index.html) is like your check engine light. We identify risks from within the company and assist in remediating them.\n\nIf you’re interested in learning more you can check out this [video on how the Risk and Field Security team adds value to GitLab]( https://www.youtube.com/watch?v=h95ddzEsTog).\n\n**What are some projects you’re working on?**\nMy team and I recently implemented a SaaS governance, risk, and compliance (GRC) tool to manage our security assurance activities. We are still in the process of fully implementing it, but we have made a lot of progress so far. Within this project we got the opportunity to review all of our processes and really uplevel the maturity of our programs. I recently presented at a user group and discussed the implementation and how [GitLab utilizes the tool for Risk Management activities](https://www.youtube.com/watch?v=ZOiHT-N1tLY).\n\n**What’s something new and/or exciting that you’d like to learn or be involved in?**\nI’m actually really excited about a new program we are building: the Customer Success Partnership Program. This is a multi-functional partnership where each of us will learn from each other about the various ways we can help support our customers. I’m really looking forward to learning more about the sales and support processes in place at GitLab and help iterate on them.\n\n**If someone was interested in a role like yours, what’s the most helpful piece of advice you could offer?**\nAlign yourself with a strong mentor who understands how the organization works. Most security principles are applicable across most industries and organizations. Encryption is encryption, right? But it is critical that you understand how security fits into the organization, how management views security, and how you can integrate security into other processes. Making strong connections throughout the organization is critical to success in risk management. It makes delivering “bad news” easier and allows you to make more educated recommendations to remediate them.\n\n---\n\n### [Mitra Jozenazemian](/company/team/#mjozenazemian) - Senior Security Engineer, [Security Incident Response Team](https://handbook.gitlab.com/job-families/security/security-engineer/#sirt---security-incident-response-team)\nJoined GitLab July 2020 / Connect with Mitra on [LinkedIn](https://www.linkedin.com/in/mitra-jozenazemian-0a05233b)\n\n![Mitra Jozenazemian](https://about.gitlab.com/images/blogimages/working-in-security/mjozenazemian_blog2.png){: .shadow.small.right.wrap-text}\n\n**What do you do and who do you collaborate with in your role?**\nI work on the GitLab [Security Incident and Response (SIRT) team](/handbook/security/#sirt---security-incident-response-team-former-security-operations). For any security incident or event that would happen here at Gitlab, we act like firefighters-- researching and responding to incidents, while working with other teams to mitigate the incident ASAP. The rest of the time, we are implementing and improving tools that can help us to detect and respond to the incidents faster and more effectively.\n\n**What are some projects you’re working on?**\nRecently, we implemented a new [security information and event management (SIEM) solution](/blog/how-we-made-gitlab-more-secure-in-twenty-twenty/) to further improve visibility and detection and response capabilities. This allows my team to send logs from different applications to the new SIEM and then we work to define different scenarios of suspicious activities. From these potential scenarios, we create alerts for detecting them and runbooks to help us respond to those alerts.\n\n**What’s something new and/or exciting that you’d like to learn or be involved in?**\nI would like to be more involved in the red team activities. I’d like to wear their red hat and try to see the organization from an attacker’s eyes and find the gaps and vulnerabilities that might be hidden.\n\n**If someone was interested in a role like yours, what’s the most helpful piece of advice you could offer?**\nTechnology, and therefore security, is a constantly changing area. So, if someone were interested in being a part of SIRT, they’d need to be familiar with several different types of technologies, frameworks and programming languages. They should remain up-to-date and informed on news and research about recent technologies, and new cyber security attacks and vulnerabilities. Being able to develop the ability to think like both an attacker and defender to improve detections and post-incident recovery process is also a very helpful skill in this area.\n\n---\n\n### [Rupal Shah](/company/team/#rcshah) - [Security Compliance Engineer](/handbook/security/#security-compliance)\nJoined GitLab October 2020 / Connect with Rupal on [LinkedIn](https://www.linkedin.com/in/rupal-shah-57a384/)\n\n![Rupal Shah](https://about.gitlab.com/images/blogimages/working-in-security/rshah_blog2.png){: .shadow.small.left.wrap-text}\n\n**What do you do and who do you collaborate with in your role?**\nI’m still pretty new to GitLab, but once I am fully up to speed, I will be the Governance, Risk and Compliance Administrator managing the GRC application, creating training, updating policy documents, evaluating frameworks and assisting with user access reviews, audits, control testing and other ad hoc security compliance related projects that are defined.  I will be working with a variety of teams throughout GitLab as Compliance affects everyone.\n\n**What are some projects you’re working on?**\nWe are onboarding our new GRC tool (ZenGRC) and I am defining a change management runbook for significant/high risks changes.  We are bringing our security training in house, so I am creating a new general security awareness training for new hires and annual review by team members. I am also focusing my time on formalizing our information security policy and standards.\n\n**What’s something new and/or exciting that you’d like to learn or be involved in?**\nI have always wanted to be involved and learn more about [FedRamp](https://www.gsa.gov/technology/government-it-initiatives/fedramp) and the entire process to get certified.  As GitLab is currently in the analysis stages, it is nice to be a part of the process and get a better understanding of the requirements necessary if we decide to get certified.\n\n**If someone was interested in a role like yours, what’s the most helpful piece of advice you could offer?**\nDon’t be scared and don’t feel overwhelmed.  Take a deep breath and dive in!  I come from a non-security/compliance background and all it takes is passion and a good mentor.  Ask lots of questions and don’t be afraid to ask any question you have!  The more you ask, the more you learn!\n\n---\n\n### [Heather Simpson](/company/team/#heather) - Senior External Communications Analyst, [Security Engineering ](/handbook/security/security-engineering/)\nJoined GitLab February 2019 / Connect with Heather on [LinkedIn](https://www.linkedin.com/in/heathersimpson700/) and [Twitter](https://twitter.com/heatherswall)\n\n![Heather Simpson](https://about.gitlab.com/images/blogimages/working-in-security/hsimpson_blog2.png){: .shadow.small.right.wrap-text}\n\n**What do you do and who do you collaborate with in your role?**\nI’ve got a unique job within our security department in that I work in a marketing communications capacity, something I referenced in the [first blog post in this series](/blog/breaking-into-security/).  I focus on increasing awareness and strengthening community engagement and industry recognition of GitLab Security initiatives, programs and team members’ expertise through campaigns and initiatives that include blogs, contributed articles, social media, online events and more. To do this, I collaborate heavily with our security teams and partner with our content, corporate and social marketing teams. I sit within our Security and Engineering Research team and so a large focus area for me is increasing awareness and engagement in our [bug bounty program](https://hackerone.com/gitlab). Part of this includes working with the hackers that contribute to our program and partnering with the HackerOne communications team to recognize the amazing contributions and talents these security researchers bring to making GitLab more secure.\n\n**What are some projects you’re working on?**\nDecember was a busy month, where most of my time went to writing and editing blogs. [“2020 through a bug bounty lens”](/blog/twenty-twenty-through-a-bug-bounty-lens/) takes a look back at the past year in terms of bug bounty metrics (reports received, hackers contributing, etc) and bounties paid out 💰. It also celebrates five winners of a contest we held in the fall, where the prize was a custom GitLab mechanical keyboard 🎉-- organizing [this contest](/blog/top-tips-for-better-bug-bounty-reports-and-a-hacker-contest/#celebrating-great-reports-and-great-reporters) and that piece of custom swag are all projects I lead.  Other new series I’ve developed and am working on are our [“Ask a Hacker” blog series that profiles some of the top hackers contributing to our bug bounty program](/blog/rpadovani-ask-a-hacker/) and our live GitLab Security Ask Me Anything (AMA) series which kicked off with an [AMA with hacker Riccardo Padovani](https://youtu.be/SK_vuZCafZ4) and will follow soon with an [AMA with GitLab’s own Red Team on Jan 26, 2020](https://docs.google.com/forms/d/e/1FAIpQLSekc1LYWYbhORNzZvLza8Btn9V0wY7K9SGVZed5RpJbczqdfw/viewform?usp=sf_link).  You can always see what I’m working on through [my GitLab profile](https://gitlab.com/heather) and also by checking out our [Security blogs](/blog/tags.html#security). I started our Security blogging program when I joined GitLab in February 2019 and, together with my security team mates, we’ve published 52 blogs to date with more great content in the works!  Speaking of, if there’s something you’d like to read about, whether it’s: what makes our approach to red teaming unique or how do our security researchers decide what, exactly, they are going to research? Message me, I’d love to hear your ideas!\n\n**What’s something new and/or exciting that you’d like to learn or be involved in?**  I think I’d like to more deeply develop my skills in the areas of search engine optimization and marketing data and analytics; this would strengthen efforts in my current role and flesh out my existing digital marketing experience and expertise.\n\n**If someone was interested in a role like yours, what’s the most helpful piece of advice you could offer?**  Be comfortable with being uncomfortable.  Many women in tech are used to being one of few women “in the room”. However, as someone working in a marketing capacity, sitting inside an engineering department, I find I’m usually (also) the only non-engineer on most calls and teams. And that’s just fine! But I’ve had to learn to be comfortable with owning and asserting my area of expertise, with asking questions for clarification when I don't understand something and with throwing first iteration content out there acknowledging that I need an SME’s help to ensure accuracy. And you know what? I’ve learned two things: I understand way more about technical concepts than I give myself credit for most times 💪 and, my asking questions and seeking clarification helps to create better and more readily consumable content for our audiences -- a win for everyone! 🙌\n\n---\n\n## Sound interesting? We're hiring!\n\nCheck out the [career opportunities page](/jobs/). Don't meet 100% of the qualifications for one of these roles? Still share your information with us! We're hiring within our Security department (and beyond) and looking for unique backgrounds and expertise. You can also learn more about GitLab’s [culture](/company/culture/) and [values](https://handbook.gitlab.com/handbook/values/) in order to get an understanding of what it might be like to work here!\n\nCover image by [#WOCinTech Chat](https://www.wocintechchat.com/).\n{: .note}\n\n",[720,9],{"slug":6855,"featured":6,"template":680},"whats-it-like-to-work-security-at-gitlab","content:en-us:blog:whats-it-like-to-work-security-at-gitlab.yml","Whats It Like To Work Security At Gitlab","en-us/blog/whats-it-like-to-work-security-at-gitlab.yml","en-us/blog/whats-it-like-to-work-security-at-gitlab",{"_path":6861,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6862,"content":6868,"config":6873,"_id":6875,"_type":14,"title":6876,"_source":16,"_file":6877,"_stem":6878,"_extension":19},"/en-us/blog/why-all-organizations-need-prometheus",{"title":6863,"description":6864,"ogTitle":6863,"ogDescription":6864,"noIndex":6,"ogImage":6865,"ogUrl":6866,"ogSiteName":667,"ogType":668,"canonicalUrls":6866,"schema":6867},"Why Prometheus is for everyone","You think you don't need Prometheus – I'm here to tell you why you're wrong. Learn why GitLab uses Prometheus, and why your organization should be using it too!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678778/Blog/Hero%20Images/monitoring-cover.png","https://about.gitlab.com/blog/why-all-organizations-need-prometheus","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why Prometheus is for everyone\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Matos\"}],\n        \"datePublished\": \"2018-09-27\",\n      }",{"title":6863,"description":6864,"authors":6869,"heroImage":6865,"date":6870,"body":6871,"category":743,"tags":6872},[4683],"2018-09-27","\nIt's no secret that here at GitLab, we hitched our wagon to [Prometheus](https://docs.gitlab.com/ee/administration/monitoring/prometheus/index.html#doc-nav) long ago. We've been\n[shipping it with GitLab since 8.16](/releases/2017/01/22/gitlab-8-16-released/). Having said that,\neven within GitLab we weren't all using Prometheus. The Support Engineering team was\nvery much in the camp of \"We don't need this to troubleshoot customer problems.\" We were wrong;\nwe needed Prometheus all along, and here's why your organization should be using it too.\n\n## What is Prometheus?\n\nFor a short answer, Prometheus is software that stores event data in real-time. But more specifically…\n\nPrometheus is a powerful and free open-source software monitoring service that records real-time metrics and provides real-time alerts. It’s built with an HTTP pull model. Prometheus collects data performance metrics which you can view through an external dashboard tool (such as Grafana) or by directly connecting to Prometheus. \n\nSoundcloud was the original developer of Prometheus but nowadays is continuously maintained by the Cloud Native Computing Foundation (CNCF). The cloud-native architecture of Prometheus has made it extremely popular as part of a modern technology stack. \n\n## Prometheus is great, so why isn't everyone using it already?\n\nI think GitLab customers fall into a few categories: You have the customer who wants to use GitLab\nbut can't or doesn't want to manage servers. They'll use [GitLab.com](/pricing/)! By making that choice they can\nleverage the hard work of our Production team and reap the benefits of what Prometheus has to offer.\n\nThen you have the customer who is [running their own simple GitLab deployment](/pricing/#self-managed), but they may\nnot know or appreciate the value of Prometheus metrics. The Support Engineering team was\nlike this too! We thought, \"We can use traditional tools. Just knowing about where logging is,\nknowing about the system, is enough to actually solve the problems that we see. Just having\nexperience is enough.\" Not so.\n\nThen you have large, enterprise customers who are deploying GitLab clusters with multiple dozens of\nservers and a lot of moving parts. For them, Prometheus really shines because the complexity\nballoons, and once you move GitLab from a single server to three, or four, or 20, being able\nto see all of the metrics in one view makes a huge difference in time to resolving critical infrastructure issues.\n\n## How we saw the light about Prometheus\n\nA large GitLab customer was experiencing a really strange, catastrophic failure scenario, and\nthe problem was proving evasive to the support team. Even after days of troubleshooting we couldn't\nfind what we were looking for, so we called in [Jacob](/company/team/#jacobvosmaer) from our\n[Gitaly](/blog/the-road-to-gitaly-1-0/) team because it looked like Gitaly was at the\ncore of the problem. We had been using Gitaly on GitLab.com for about six months at that point\nand he had never seen it behave this way before. It looked like Gitaly was accessing Git data,\nbut just _extremely slow_, and it would spread across the cluster one server at a time. Jacob\nand I speculated and made some Gitaly dashboards, and while that was a good moment of cross-team\ncollaboration, he was stumped.\n\nMost of the time when we're debugging GitLab, it's clear to pinpoint the root of the problem.\nBut in this case, it was a catastrophic failure across the entire cluster that was a ticking timebomb.\nWhen we'd see the indicators we'd effectively have 15-35 minutes before the entire fleet was down.\nThis customer actually had Prometheus on their roadmap but hadn't deployed it yet, so when\nthe failure happened it was top of our list of things to deploy:\n\n**Support**: We should focus on trying to understand why this host is affected.\n\n**Production**: If we get better observability with Prometheus we'll move faster.\n\n**Support**: I'm worried this is a distraction! We don't have much time.\n\n**Production**: Watch and learn. Watch and learn.\n\n_(Cue dramatic montage of hackers with GitLab stickers on their laptops feverishly typing under duress)_\n\nOnce Prometheus was in place, we called in the Production team. They run one of the largest\nGitLab instances: GitLab.com. We exported their dashboard and gave it to the customer, so\nwithin minutes they had a GitLab production-scale dashboard that was all of the things that\nour production engineers use. Now, we could leverage the wealth of knowledge of our Prometheus\nexperts, as it's a familiar interface and they know exactly what they're looking at.\n\nWith that as a starting point we started querying and slicing data, and dashboards, which let\nus build a couple of different facets that let us view the data and come to some conclusions.\n\"Okay, it looks like once a host becomes 'tainted,' all Git-level operations spike and _HALT_.\nNow we can finally ask the question, why?\" And then, when we asked that, we saw that it was\na problem with Amazon's EFS file system. We had hit some upper boundary of EFS access and,\nhaving identified it, we were able to fix it by moving those specific files out of EFS. After we\nmade that change it was easy to use Prometheus and Grafana to verify that the state was sound\nand everything was working as expected afterwards, without even lifting a finger. We just looked\nat the dashboard in place. So while the customer had intended to deploy Prometheus later this\nyear, now, in this emergency situation, Prometheus definitely saved the day and is a huge part\nof keeping their GitLab infrastructure healthy. Without it we wouldn't be nearly as confident\nor comfortable in our solution.\n\n## Prometheues has opened up a whole world of possibilities.\n\nWe have another large client that's on an older version of GitLab without Prometheus. We're\nworking to debug things there and while we're able to do it, it's slower going. It requires a lot\nmore manual effort to coalesce the data and get it in a form we can use. It often takes about\n35-40 minutes to get the data, slicing with grep, AWK, and friends and at least one man page\nto look up syntax. Whereas, with Prometheus and Grafana, we'd be able to just access and view\nthe data, query it, and affect it within minutes. We already have a lot of [built-in monitoring capabilities](https://docs.gitlab.com/ee/administration/monitoring/). GitLab is a complex\nsystem built of various open source sub-systems, and we're monitoring all of them with Prometheus.\nYou can too.\n\n### Everyone should be using our GitLab.com dashboard\n\nAs I said earlier, in our intense, catastrophic scenario we gave the customer our GitLab.com\ndashboard. Any customer can use this dashboard as a template! You literally can go to [dashboards.gitlab.com](https://dashboards.gitlab.com), click \"export,\" get the dashboard, run your instance, then click \"import.\" It will show up, and\nyou just need to tweak the name so that it's not defaulting to our GitLab Production cluster.\nThen Prometheus just fills in the data.\n\n\u003Ciframe src=\"https://giphy.com/embed/12NUbkX6p4xOO4\" width=\"480\" height=\"440\" frameBorder=\"0\" class=\"giphy-embed\" allowFullScreen>\u003C/iframe>\n\nWe're trying to standardize around using the dashboards here, so that while there are differences\nand nuances in the deployments etc., we're speaking a common language, and have a common\nmeeting point for GitLab engineers across teams to monitor and talk GitLab performance.\n\n## Are you convinced about Prometheues yet?\n\nWe're now actively training our support team on Prometheus. And it's likely that other organizations\nprobably have the same thing happening – where another group could be impacting or helping,\nbut they're not collaborating, so they can't see where or how they can help one another. We've\nseen the light! So, we're training our team on Prometheus, and it's something that we want\nto make sure that everybody can make use of.\n\nMany customers think they don't need Prometheus and are reluctant to use it because it adds\noverhead; you have to configure it and set it up, and it may require a bit of finessing. GitLab\nis trying to make that even easier, but right now when you're building a bespoke deployment,\nit requires a bit of time, and you may not think time invested is worth it. And I'm here to say,\nit is, get it now! In fact, it's already there. You just need to turn it on! I'm advocating that all\nlarge, customer deployments over 500 users have Prometheus running by 2019. Turn it on and\nthen we'll all reap the rewards.\n",[677,9,1295],{"slug":6874,"featured":6,"template":680},"why-all-organizations-need-prometheus","content:en-us:blog:why-all-organizations-need-prometheus.yml","Why All Organizations Need Prometheus","en-us/blog/why-all-organizations-need-prometheus.yml","en-us/blog/why-all-organizations-need-prometheus",{"_path":6880,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6881,"content":6887,"config":6892,"_id":6894,"_type":14,"title":6895,"_source":16,"_file":6896,"_stem":6897,"_extension":19},"/en-us/blog/why-basic-security-practices-matter-for-everyone",{"title":6882,"description":6883,"ogTitle":6882,"ogDescription":6883,"noIndex":6,"ogImage":6884,"ogUrl":6885,"ogSiteName":667,"ogType":668,"canonicalUrls":6885,"schema":6886},"How information security practices help everyone","Security oversights can happen to anyone without the right practices in place. Read here on why security practices matter and what you should use.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670783/Blog/Hero%20Images/pexels-christina-morillo.jpg","https://about.gitlab.com/blog/why-basic-security-practices-matter-for-everyone","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How information security practices help everyone\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2020-09-14\",\n      }",{"title":6882,"description":6883,"authors":6888,"heroImage":6884,"date":6889,"body":6890,"category":698,"tags":6891},[1010],"2020-09-14","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nWe asked GitLab sr. security engineer Andrew Kelly about the projects he’s working on, what he’s learned from mistakes he’s made in InfoSec and why writing unit tests for unexpected events is so important.\n\n![Andrew Kelly Headshot](https://about.gitlab.com/images/blogimages/akellycirc.png){: .small.right.wrap-text} **Name:** Andrew Kelly\n\n**Title:** Senior Security Engineer, Application Security\n\n**How long have you been at GitLab?**: I joined July 2019\n\n**GitLab handle:** [@ankelly](https://gitlab.com/ankelly)\n\n\n\n#### Tell us what you do here at GitLab:\nI work with GitLab teams and HackerOne reporters to ensure that GitLab products are secure. This includes conducting [application security reviews](/topics/devsecops/), verifying and determining the impact severity of vulnerabilities, collaborating with development and product teams on solutions, and a variety of other application security related tasks.\n\n#### What’s the most challenging or rewarding aspect of your role? \nI find that one of the most rewarding aspects of my role is working with our HackerOne reporters. The [GitLab bug bounty program](https://hackerone.com/gitlab) receives reports from all sorts of hackers all over the world and I really enjoy the process of investigating and triaging their findings. These reports describe potential vulnerabilities impacting a number of different GitLab applications and systems. Oftentimes, while recreating a vulnerability or investigating a report I end up learning something new about application security or GitLab itself.\n\n#### And, what are the top 2-3 initiatives you’re currently focused on? \nI’m the [application security stable counterpart](/handbook/security/security-engineering/application-security/stable-counterparts.html) for the [Growth](/handbook/engineering/development/growth/) and [Enablement](/handbook/engineering/development/enablement/) teams. Application Security stable counterparts collaborate with teams throughout the software development lifecycle to assess risk, review code, and otherwise help drive security-conscious outcomes. This has given me an opportunity to work with an amazing and talented group of people on a number of different projects and in much more depth than I might otherwise be able to.\n\nIn addition, I’ve recently been working to help get GitLab’s [Secure tooling](https://docs.gitlab.com/ee/user/application_security/) enabled in several of our major product repositories. This effort has involved coordination across teams, code review, and working with [CI/CD configurations](/topics/ci-cd/). This impacts a significant number of GitLab repositories and I’m excited for the amount of coverage this will provide. I’ve also been involved with configuring and enabling the [GitLab container scanning tools](https://docs.gitlab.com/ee/user/application_security/container_scanning/) to analyze certain docker images.\n\n#### What is the most significant piece of security advice you could provide to a colleague or friend? \nWrite unit tests that cover unexpected cases. Oftentimes when writing software we get focused on expected use cases and our specs tend to reflect that bias. In order to improve application security, I recommend taking the time to think about creative and malicious ways that user controlled input can be abused. This is worth the effort because you will inevitably find situations in which the code did not behave the way you expected and this will help you prevent some security problems before they get released.\n\n#### How did you get into security? \nI spent a lot of time on the internet growing up. My experiences online and the depiction of hackers in pop culture sparked my interest in security at a young age. It wasn’t until several years into my software development career that I realized it was something I was capable of doing and could do professionally. I started by learning about common web vulnerabilities and looking for them in the codebase that I contributed to as a developer. Over time I continued to learn and build my information security knowledge either on the job or in my freetime. \n\n#### From the perspective of your role, what’s GitLab doing better than anyone else in terms of security? \nI’m hesitant to say that we’re doing this better than *anyone* else, but I’m very proud of GitLab’s commitment to transparency. Our teams are very committed to clear, open communication internally and with our customers and community members. This is especially true with regards to security -- transparency is baked into our procedures and processes and is always at the forefront of our minds. From a security perspective, this can be a tricky balancing act but I’m happy that it’s something we take very seriously. I think [Dominic Couture](/company/team/#dcouture) covered this well in a recent blog post ([“Security strengthened by iteration, and transparency\n”](https://about.gitlab.com/blog/security-strengthened-by-interation-and-transparency/)) that I recommend reading.\n\n#### What was your personal worst moment in the Infosec world and how did you recover? \nYears ago I wanted to build an application using a particular platform’s API, so I searched for and visited what I thought was the correct website and tried to log in. For some reason, my password didn’t work so I tried it a few more times and eventually gave up. Hours later I was alerted to an attempted sign-in to the real website from a location thousands of miles away. At this point, I realized the mistake I had made. In my haste I didn’t notice that the website I was putting my credentials into was actually using a [homograph](https://en.wikipedia.org/wiki/IDN_homograph_attack) to trick people whose attention is divided like mine was into giving up their passwords. Luckily I had two-factor authentication enabled and so my account was still protected, but I learned and reinforced some very important lessons that day:\n* Slow down and take the time to verify the address of the website you are visiting before entering any credentials\n* Better yet, bookmark important sites rather than searching for the website you’re looking to log into\n* Use a password manager and ensure you use unique passwords for each website\n* Enable multi-factor authentication everywhere that it is supported\n\nThese last two pieces of advice are something you’ll hear from many security professionals, including some of my coworkers -- like in this post, [\"The sky is not falling: tips to avoid the FUD and protect yourself online\"](/blog/the-sky-is-not-falling/).\n\n#### What would you like to see more of in the industry? \nI’d like to see vulnerability management integrated into the software development lifecycle across the tech industry. Organizations large and small are typically building applications bundled with dozens, if not hundreds, of dependencies and third-party libraries, all of which have the potential to become security concerns. Tooling and alerting has come a long way to help out, but it still requires organizational discipline and a not insignificant time investment. This can be a time-consuming task and often involves a lot of collaboration on behalf of the engineering and product teams, but is worth every bit of effort.\n\n## Now, for the questions you *really* want to have answered:\n\n#### VIM or EMACS?\nI believe that you have to choose the right tool for the job. It just so happens to usually be VIM, at least for me.\n\n#### What was the first computer you owned?\nThe first computer my family owned was a Commodore 64. My experiences playing video games on that and other early consumer computers paved the way for a lifetime of interest in technology.\n\n#### Show us your frequently used slack emoji list. What do you think this says about you?\nMy current 'Frequently Used' emojis indicate to me that I probably spend a little too much time in the #dog channel 😆. It also appears that emojis are my favorite way to say thank you.\n\n![Andrew Kelly's Emojis](https://about.gitlab.com/images/blogimages/akelly_slackmojis.png){: .shadow.small.center.wrap-text}\n\n\nPhoto by [Christina Morillo](https://www.pexels.com/@divinetechygirl) from [Pexels](https://www.pexels.com).\n{: .note}\n",[720,9],{"slug":6893,"featured":6,"template":680},"why-basic-security-practices-matter-for-everyone","content:en-us:blog:why-basic-security-practices-matter-for-everyone.yml","Why Basic Security Practices Matter For Everyone","en-us/blog/why-basic-security-practices-matter-for-everyone.yml","en-us/blog/why-basic-security-practices-matter-for-everyone",{"_path":6899,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6900,"content":6905,"config":6910,"_id":6912,"_type":14,"title":6913,"_source":16,"_file":6914,"_stem":6915,"_extension":19},"/en-us/blog/why-continuous-fuzzing",{"title":6901,"description":6902,"ogTitle":6901,"ogDescription":6902,"noIndex":6,"ogImage":690,"ogUrl":6903,"ogSiteName":667,"ogType":668,"canonicalUrls":6903,"schema":6904},"Why (Continuous) Fuzzing","Learn what fuzzing is, what's so good at fuzzing code continuously and why to do it here!","https://about.gitlab.com/blog/why-continuous-fuzzing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why (Continuous) Fuzzing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yevgeny Pats\"}],\n        \"datePublished\": \"2020-12-10\",\n      }",{"title":6901,"description":6902,"authors":6906,"heroImage":690,"date":6907,"body":6908,"category":698,"tags":6909},[2233],"2020-12-10","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n\nIn this post we will focus on why continuous fuzzing is needed and what are the challenges in implementing continuous fuzzing.\nPrevious posts/papers regarding why fuzzing in general is important as well as why it’s a good idea even to integrate\nit as part of the Go toolchain can be found [here](https://docs.google.com/document/d/1N-12_6YBPpF9o4_Zys_E_ZQndmD06wQVAM_0y9nZUIE) (written by Dmitry Vyukov and Romain Baugue – highly recommended, the link talks about go but concepts can be applied to other languages).\n\n## Fuzzing Quick Recap?\n\nEssentially fuzzing consist of two types of jobs:\n\n- Fuzzing – A job that can run infinitely. This job automatically generates interesting test-cases that cover more paths, as well as monitors for crashes and other memory related problems.\n- Regression – Run the fuzzer through a set of specific test-cases. Usually these were generated by the previously mentioned long running jobs. Regression jobs are generally very short.\n\n\n## Continuous Fuzzing Challenges\n\nThere are a few challenges/questions that arise from how to integrate fuzzing to the current CI.\nWe will walk through some of them as there are a lot of other open questions that really depends on the development workflow and the specific project.\n\n**Challenge 1 –  Long Running Jobs**: Fuzzing is a long-running (infinite) job unlike a CI that we try to keep as short as possible to provide fast feedback for commits/MRs.\n\n**Solution 1 – Async Jobs**: This where we need to spawn a different server or use a platform like GitLab to run the fuzzers [asynchronously](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#continuous-fuzzing-long-running-async-fuzzing-jobs).\nThe platform will notify the administrators, developers, or the relevant security people of any new vulnerabilities that the fuzzers find. In GitLab, these will be reported on our [Security Dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/). This could take days or months of running continuously.\n\n\n**Challenge 2 – Many Targets * Many Versions = Lots of time and money**: Which versions should should you fuzz? We need to decide wisely which versions to fuzz, as blindly fuzzing all possible versions in a project infinitely for many targets will cost a lot of money and compute resources.\n\n**Solution 2 – Master + Stable**: One approach that we saw popular with users is fuzzing the development branch (master) and release branch. The development branch is fuzzed continuously and the fuzzer is updated every time new code is pushed to master.\nThe updated fuzzers check the additional code but keeps the corpus from previous runs.\nThis way the fuzzers can essentially always continue from where they stopped and only work on the additional code.\nGitLab helps with managing the corpus and keeping it in minimised state.\n\n**Challenge 3 – Learning from old mistakes**: Once we setup continues fuzzing we aggregate very valuable test-cases and crashes that we fix along the way.\nWe would love to use all those precious test-cases to check every MR before it get’s merged.\n\n**Solution 3 – Regression Fuzz Tests**: For every MR just like unit-test we run the fuzzers through all the generated test-cases and the fixed crashes which is usually a very quick process which fits a classic CI.\nGitLab helps running the fuzzers with the aggregated corpus from previous job, fail the CI and alert the developer immediately via the security dashboard.\nShort regresion can also be combined with short fuzz tests runs that run inline with the CI to help find also new bugs in MRs.\n\n## Summary\n\nThis was a quick walkthrough of the some of the challenges of integrating continuous fuzzing to projects from our experience.\n\nCheck out our [full documentation](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) and the [example repositories](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing)\nand try adding fuzz testing to your own repos!\n",[9,720,722],{"slug":6911,"featured":6,"template":680},"why-continuous-fuzzing","content:en-us:blog:why-continuous-fuzzing.yml","Why Continuous Fuzzing","en-us/blog/why-continuous-fuzzing.yml","en-us/blog/why-continuous-fuzzing",{"_path":6917,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6918,"content":6923,"config":6928,"_id":6930,"_type":14,"title":6931,"_source":16,"_file":6932,"_stem":6933,"_extension":19},"/en-us/blog/why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines",{"title":6919,"description":6920,"ogTitle":6919,"ogDescription":6920,"noIndex":6,"ogImage":690,"ogUrl":6921,"ogSiteName":667,"ogType":668,"canonicalUrls":6921,"schema":6922},"Why GitLab is building Meltano, an open source platform for ELT pipelines","Our goal is to make the power of data integration available to all by building a true open source alternative to existing proprietary hosted ELT solutions.","https://about.gitlab.com/blog/why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitLab is building Meltano, an open source platform for ELT pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Douwe Maan\"}],\n        \"datePublished\": \"2020-05-18\",\n      }",{"title":6919,"description":6920,"authors":6924,"heroImage":690,"date":5770,"body":6926,"category":698,"tags":6927},[6925],"Douwe Maan","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nThis post was originally [published on the Meltano blog](https://meltano.com/blog/why-we-are-building-an-open-source-platform-for-elt-pipelines/) on May 13, 2020.\n{: .alert .alert-info}\n\nThis post is part 2 of a 2-part series to announce and provide context on the new direction of [Meltano](https://meltano.com).\nIf you've been following Meltano for a while or would like to have some historical context, start with part 1: [Revisiting the Meltano strategy: a return to our roots](https://meltano.com/blog/revisiting-the-meltano-strategy-a-return-to-our-roots/).\nIf you're new to Meltano or are mostly interested in what's coming, feel free to skip part 1 and start here.\nIf you're worried that reading this entire post will take a lot of time, feel free to jump right to the conclusion: [Where Meltano fits in](#where-meltano-fits-in).\n{: .note}\n\n## Introduction\n\nIf you've read [part 1 of the series](https://meltano.com/blog/revisiting-the-meltano-strategy-a-return-to-our-roots/), you know that [Meltano](https://meltano.com) is now focused on building an **open source platform for data integration and transformation (ELT) pipelines**, and that we're very excited about it.\n\nBut why are we even building this?\n\nIsn't data integration (getting data from sources, like SaaS tools, to destinations, like data warehouses) a solved problem by now, with modern off-the-shelf tools having taken the industry by storm over the past few years, making it so that many (smaller) companies and data teams don't even need data engineers on staff anymore?\n\nOff-the-shelf ELT tools are not _that_ expensive, especially compared to other tools in the data stack, like Looker, and not having to worry about keeping your pipelines up and running or writing and maintaining data source connectors (extractors) is obviously extremely valuable to a business.\n\nOn top of that, writing and maintaining extractors can be tedious, thankless work, so why would anyone want to do this themselves instead of just paying a vendor to handle this burden instead?\n\nWho would ever want to use a self-managed ELT platform? And why would anyone think building this is a good use of time or money, _especially_ if it's going to be free and open source?\n\n---\n\nIn [part 1](https://meltano.com/blog/revisiting-the-meltano-strategy-a-return-to-our-roots/), I explained why we have concluded that in order to eventually realize our end-to-end vision for Meltano (a single tool for the entire data lifecycle, from data source to dashboard), we have to go all-in on positioning Meltano as an open source self-managed platform for running data integration and transformation (ELT) pipelines, and will turn Meltano into a true open source alternative to existing proprietary hosted solutions like [Alooma](https://www.alooma.com/), [Blendo](https://www.blendo.co/), [Hevo](https://hevodata.com/), [Matillion](https://www.matillion.com/products/etl-software/), [Pentaho](https://www.hitachivantara.com/en-us/products/data-management-analytics/pentaho-platform.html), and [Xplenty](https://www.xplenty.com/), in terms of ease of use, reliability, and quantity and quality of supported data sources.\n\nHowever, the points and questions raised above are totally valid, and were in fact raised by actual data engineers I've talked to over the past few weeks. While Meltano (and [GitLab](https://about.gitlab.com), which sponsors its development) have a need for the existence of such a tool, it's a separate matter entirely whether there are any data engineers or data teams out there who share that need.\n\nWould any data team actually consider joining the community, contributing to Meltano and its extractors and loaders, and eventually migrating to the open source tool, away from whatever proprietary solution they use today?\n\n## The problem: pay to play\n\nThe idea is that every data team in the world needs a data integration tool, because one way or another you have to get your data from your various sources into your data warehouse so that it can be analyzed. And since every company would be better off if they were analyzing their data and learning from their ups and downs, every company in the world needs a data integration tool whether they already realize it or not.\n\nSince there is currently no true open source alternative to the popular proprietary tools, the data space has effectively become \"pay to play\". There are many great open source analytics and business intelligence tools out there ([Superset](https://superset.incubator.apache.org/), [Metabase](https://www.metabase.com/), and [Redash](https://redash.io/) come to mind, and let's not forget that Meltano comes with built-in analytics functionality as well), but all assume that your data will somehow have already found its way into a data warehouse.\n\nIf for any reason at all you cannot use one of the hosted platforms, you are essentially out of luck and will not get to compete on a level playing field with those companies that can afford to integrate their data and start learning from it. Even if you have everything else going for you, you are massively disadvantaged from day one.\n\nPerhaps, you do not think of these off-the-shelf tools as particularly expensive, you're fine with your sensitive data flowing through a US company's servers, and you would happily pay for professional services if you ever need to extract data from a source that isn't supported already. \n\nHowever, many around the world will find prices US companies charge prohibitively expensive relative to their local income, may prefer (or be legally required) to have their data not leave their country or their servers, or may find that the locally grown SaaS services they use are often not supported by the existing US-centric vendors.\n\nAnd to be clear, US companies are not immune to these issues, even if they may be somewhat less affected by the financial argument. Think of HIPAA compliance, for example, which many (most? all?) hosted tools don't offer unless you sign up for one of their more expensive plans.\n\n**If you do not feel the pain of the current situation or see the need for change, recognize that your experience may not be representative.**\n\n### Data integration as a commodity\n\nThis perspective leads me to an argument with an ideological angle, that is particularly compelling to me because of the many parallels I see with the early days of [GitLab](https://about.gitlab.com/): the open source project that was [founded in Ukraine back in 2011](https://about.gitlab.com/company/history/) with the goal of building a self-managed alternative to the likes of [GitHub](https://github.com/) and [Bitbucket](https://bitbucket.org/), that a few years later became an open core product maintained primarily by the newly founded company that shares its name. To this day, GitLab comes in open source and proprietary flavors, and the functionality included in the Community Edition continues to be sufficient for hundreds of thousands of organizations around the world, that would otherwise have needed to opt for a paid, proprietary alternative. As GitLab is sponsoring the development of Meltano, these parallels are not a coincidence.\n\nSince an ELT platform is a tool every data engineer and every company needs if they want to have the best chance of survival and success, I would argue that it should be a commodity and should be available at a reasonable cost to everyone who wants or needs it. Anything less than that hurts a significant number of companies in their ability to reach their true potential and serve their users and customers as well as they would want to, thereby stifling innovation and competition, and we all end up paying the price because we have to deal with companies and products that are less optimized and improved than they could be.\n\nThe obvious question: if this is apparently such a problem, why haven't tons of competitors popped up already to serve these local markets or inject some competition into the US market? Orchestrating reliable data pipelines _is_ a solved problem, even in the open source space, where great tools like [Airflow](https://airflow.apache.org/) and [Luigi](https://github.com/spotify/luigi) exist and are running in production at thousands of organizations. That's not to say they're as easy to configure and get started with as the hosted platforms we're talking about, but the technology is there, assuming you have an extractor and loader to plug in.\n\nAnd I think that assumption is at the core of the issue, and at the core of the economic moat that the existing vendors have created around themselves, that makes it hard for new parties to enter the market and compete: the impressive amount of data sources they support out of the box, and their massive (in-house or outsourced) teams that have spent and continue to spend thousands of hours developing and maintaining these extractors and loaders.\n\nIf you've read [part 1](https://meltano.com/blog/revisiting-the-meltano-strategy-a-return-to-our-roots/) of this 2-part series, you'll remember that we ran into this ourselves when we offered a hosted version of Meltano's data connection and analytics interface to non-technical end-users. They could go straight from connecting their data source to viewing a dashboard, but only if we had written the extractor, loader, transformations, and models for that data source beforehand, and if we would continue to maintain these forever. We realized that this wasn't going to scale, and so would most companies that would decide to just write and maintain their own extractors instead of paying someone else to do it: it's a lot of work, and **it never ends**.\n\n## The solution: open source\n\nUltimately, though, the size of the economic moat that exists around these vendors can be measured in terms of developer hours, and there's no secret sauce or intellectual property that separates the current major players from anyone else out there who has their own hours to bring to the table.\n\nBy yourself, as a single company or data engineer, implementing and maintaining extractors for all of the data sources you need to integrate is not feasible, which is why most don't.\n\nTogether, though, that changes. With a big enough group of people capable of programming and motivated to collaborate on the development and maintenance of extractors and loaders, it's just a matter of time (and continued investment of time by a subset of the community) before every proprietary extractor or loader has an open source equivalent. The maintenance burden of keeping up with API and schema changes is not insignificant, but if open source communities can manage to maintain language-specific API client libraries for most SaaS APIs out there, there's no reason to think we'd have a harder time maintaining these extractors.\n\nAssuming there is no secret sauce or key intellectual property involved, **a sufficiently large and motivated group of people capable of programming can effectively will any new tool into existence**: that is the power of open source.\n\nThe more common the data source, the more people will want it, the faster it'll be implemented, the more heavily it'll be tested, and the more actively it'll be maintained. It doesn't need to take long before the segment of the market that only uses these common data sources will be able to swap out their current data integration solution for this open source alternative. It's not an all-or-nothing matter either: data teams can move their data pipelines over on a pipeline-by-pipeline basis, as extractors become available and reach the required level of quality.\n\nOf course, a self-managed platform for running data integration pipelines wouldn't just need to support a ton of extractors and loaders. You would also want to be confident that you can run it in production and get the same reliability and monitoring capabilities you get with the hosted vendors. Fortunately, this is where we can leverage an existing open source tool like Airflow or Luigi, that this hypothetical self-managed platform could be built around.\n\n### Everyone wins\n\nEven if you're not personally interested in ever using a self-managed data integration platform, you may benefit from us building one anyway.\n\nOpen source is the most promising strategy available today to increase competition in the data integration and data pipeline space. Even if the specific tool we're building doesn't actually become the Next Big Thing, the market will benefit from that increased competition.\n\nDevelopers of new SaaS tools and data warehouse technology would also benefit from an open source standard for extractors and loaders. Rather than wait (or pay) for data integration vendors to eventually implement support for their tool once it reaches a high enough profile or once its users start begging (or paying) the vendor loudly enough, new tools could hit the ground running by writing their own integrations. Today, many companies wouldn't consider switching to a new SaaS tool that isn't supported by their data integration vendor at all, putting these tools at a significant competitive disadvantage against their more mature and well-connected competitors.\n\nThe only ones who have something to lose here are the current reigning champions. For everyone else it's a win-win, whether you actually contribute to or use Meltano, or not. If you don't believe me, just look at the [DevOps](/topics/devops/) space and the impact that GitLab has had on the industry and the strategy and offering of the previously dominant players, GitHub and Bitbucket.\n\nIf an industry has effectively become \"pay to play\" because every software engineer in that industry needs to use one of a handful of paid tools in order to get anything done at all, there is a massive opportunity for an open source alternative \"for the people, by the people\" to level the playing field, and disrupt the established players from the bottom on up.\n\nOf course, GitLab is not just interested in sponsoring the development of such an open source project out of the goodness of its heart. The hope is that eventually, a business opportunity will arise out of this project and its community and ecosystem, because even if a truly competitive free and open source self-managed option is available, there will always be companies that would still prefer a hosted version with great support and enterprise features, who won't mind paying for it.\n\nBut for everyone else, **there will always be a Community Edition, and data integration will forever be a commodity rather than pay to play**.\n\n## The Singer specification\n\nOf course, we are not the first to be intrigued by the concept of open source data integration. Most significantly, [Stitch](https://www.stitchdata.com/) has developed the [Singer specification](https://www.singer.io/), which they describe as follows:\n\n> Singer describes how data extraction scripts—called “taps” —and data loading scripts—called “targets”— should communicate, allowing them to be used in any combination to move data from any source to any destination. Send data between databases, web APIs, files, queues, and just about anything else you can think of.\n\nThere's a [Getting Started guide](https://github.com/singer-io/getting-started/) on how to develop and run taps and targets (extractors and loaders), many dozens of them have already been written for wide range of data sources, warehouses and file formats, a good amount of them are actively maintained and being used in production by various organizations, and the [Singer community on Slack](https://singer-slackin.herokuapp.com/) has over 2,100 members, with new people joining every day.\n\nOnce you've written (or installed) a tap and target, you can pipe them together on the command line (`tap | target`) and see your data flow from source to destination, which you can imagine is quite satisfying.\n\nOnce you've hit that milestone, though, the next step is not quite so obvious. How do I actually build a data pipeline out of this that I can run in production? Is there a recommended deployment or orchestration story? How do I manage my pipeline configuration and state? How do I keep track of the metrics some taps output, and how do I monitor the whole setup so that it doesn't fall flat on its face while I'm not looking?\n\nUnfortunately, the Singer specification and website don't touch on this. A number of tools have come out of the Singer community that make it easier to run taps and targets together ([PipelineWise](https://transferwise.github.io/pipelinewise/), [singer-runner](https://github.com/datamill-co/singer-runner), [tapdance](https://github.com/aaronsteers/tapdance), and [knots](https://github.com/singer-io/knots), to list a few), and some of these are successfully being used in production, but getting to that point still requires one to figure out and implement a deployment and orchestration strategy, and those who have managed to do so effectively have all needed to reinvent the wheel.\n\nThis means that while open source extractors and loaders do exist, as does a community dedicated to building and maintaining them, what's missing is the open source tooling and documentation around actually deploying and using them in production.\n\n### The missing ingredients\n\nIf this tooling did exist and if Singer-based data integration pipelines were truly easy to deploy onto any server or cloud, the Singer ecosystem immediately becomes a lot more interesting. Anyone would be able to spin up their own [Alooma](https://www.alooma.com/)/[Blendo](https://www.blendo.co/)/[Hevo](https://hevodata.com/)/[Matillion](https://www.matillion.com/products/etl-software/)/[Pentaho](https://www.hitachivantara.com/en-us/products/data-management-analytics/pentaho-platform.html)/[Xplenty](https://www.xplenty.com/)-alternative, self-managed and ready to go with a wide range of supported data sources and warehouses. Existing taps and targets would get more usage, more feedback, and more contributions, even if many prospective users may still end up opting for one of the proprietary alternatives in the end.\n\nMany people who come across the Singer ecosystem today end up giving up because they can't see a clear path towards actually using these tools in production, even if taps and targets already exist for all of the sources and destinations they're interested in. You have to be particularly determined to see it through and not just opt for one of the hosted alternatives, so the majority of people developing taps and targets and running them in production today are those for whom _not_ self-hosting was never really an option. Any amount of better tooling and documentation will cause people to take the Singer ecosystem more seriously as an open source data integration solution, and convince a couple more people to give it a try, who would have long given up today.\n\nDeveloping taps and targets is also not as easy as it could be. The Getting Started guide and [singer-tools](https://github.com/singer-io/singer-tools) toolset are a great start, and implementing a basic tap is pretty straightforward, but building one you would actually be comfortable running in production is still a daunting task. The existing taps can serve as examples, but they are not implemented consistently and don't all implement the full range of Singer features. The [singer-python](https://github.com/singer-io/singer-python) library contains utility functions for some of the most common tasks, but taps end up reimplementing a lot of the same boilerplate behavior anyway. Moreover, a testing framework or recommended strategy does not exist, meaning that users may not find out that a small inconspicuous change broke their extractor or loader until they see their entire data pipeline fail.\n\nAll in all, the Singer ecosystem has a ton of potential but suffers from a high barrier to entry, that negatively affects the experience of those who want to use using existing taps and targets, as well as those potentially interested in developing new ones.\n\nOver the past few weeks, I've spent many hours talking to various members of the Singer community who _have_ been able to get their Singer-based pipelines running in production, and the observations above are informed by their perspectives and experience. Unanimously, they agreed that the Singer ecosystem is not currently living up to its potential, that change is needed, and that better tooling and documentation around deployment and development would go a long way.\n\n## Where Meltano fits in\n\nAs I'm sure you've pieced together by now, [Meltano](https://meltano.com/) intends to be that tooling and bring that change.\n\nOur goal is to **make the power of data integration available to all** by turning Meltano into a **true open source alternative to existing proprietary hosted ELT solutions**, in terms of ease of use, reliability, and quantity and quality of supported data sources.\n\nLuckily, we're not starting from zero: Meltano already speaks the Singer language and [uses taps and targets for its extractors and loaders](https://meltano.com/#integration). Its support goes beyond simply piping two commands together, as it also manages [configuration](https://meltano.com/docs/command-line-interface.html#config), [entity selection](https://meltano.com/docs/command-line-interface.html#select) and [extractor state](https://github.com/singer-io/getting-started/blob/master/docs/CONFIG_AND_STATE.md#state-file) for you. It also makes it super easy to [set up pipeline schedules](https://meltano.com/#orchestration) that can be run on top of a supported orchestrator like [Airflow](https://airflow.apache.org/).\n\nAdditionally, Meltano supports [dbt](https://www.getdbt.com/)-based [transformation as part of every ELT pipeline](https://meltano.com/#transformation), and comes with a basic web interface for [data source connection and pipeline management](https://meltano.com/docs/analysis.html#connect-data-sources) and [point-and-click analytics and report and dashboard creation](https://meltano.com/docs/analysis.html#explore-your-data), enabling you to go from data to dashboard using a single tool, that you can [run locally or host on any cloud](https://meltano.com/docs/installation.html).\n\nFor the foreseeable future, though, our focus will primarily be on [data integration](https://meltano.com/#integration), not transformation or analysis.\n\nWhile we've come a long way already, there's still plenty of work to be done on the fronts of ease of use, reliability, and quantity and quality of supported data sources, and we can't afford to get distracted.\n\n### Let's get to work!\n\nIf any of the above has resonated with you, or perhaps even inspired you, we'd love your help in realizing this vision for Meltano, the Singer ecosystem, and the data integration space in general. We literally won't be able to do it without you.\n\nBefore anything else, you'll want to see what Meltano can already do today by following the [examples on the homepage](https://meltano.com/). They can be copy-pasted right onto your command line, and in a matter of minutes will take you all the way through [installation](https://meltano.com/#installation), [integration](https://meltano.com/#integration), [transformation](https://meltano.com/#transformation), and [orchestration](https://meltano.com/#orchestration) with the [`tap-gitlab` extractor](https://meltano.com/plugins/extractors/gitlab.html) and [`target-jsonl`](https://meltano.com/plugins/loaders/jsonl.html) and [`target-postgres`](https://meltano.com/plugins/loaders/postgres.html) loaders.\n\nOnce you've got that working, you'll probably want to try Meltano with a different, more realistic data source and destination combination, which will require you to add a new [extractor](https://meltano.com/plugins/extractors/) ([Singer tap](https://www.singer.io/#taps)) and/or [loader](https://meltano.com/plugins/loaders/) ([Singer target](https://www.singer.io/#targets)) to your Meltano project. To learn how to do this, the homepage once again [has got you covered](https://meltano.com/#meltano-add).\n\nAnd that's about as far as you'll be able to get right now, with Meltano's existing tooling and documentation. Running a Meltano pipeline locally (with or without Airflow) is one thing, but actually deploying one to production is another. As we've identified, this is one of the places where the Singer ecosystem and documentation currently fall short, and for the moment, [Meltano is no different](https://gitlab.com/groups/meltano/-/epics/79).\n\nFor this reason, the first people we would love to get involved with the Meltano project are **those who are already part of the Singer community**, and in particular **those who have already managed to get Singer-based ELT pipelines running in production**. We want to make it so that all future Singer community members and Meltano users will be able to accomplish what they did, and no one knows better what that will take (and how close or far off Meltano currently is) than they do.\n\nIf you're one of these people, or simply anyone with similarly relevant feedback, ideas, or experience, we'd love it if you would:\n\n- [give Meltano a try](https://meltano.com/) and compare it to the tools you are using today,\n- [join us on Slack](https://join.slack.com/t/meltano/shared_invite/zt-cz7s15aq-HXREGBo8Vnu4hEw1pydoRw) to receive (and provide) community support,\n- [follow us on Twitter](https://twitter.com/meltanodata) to stay up to date on new releases and other developments,\n- [file new issues on GitLab.com](https://gitlab.com/meltano/meltano/-/issues/new) for any ideas you have or bugs you run into,\n- [participate in existing issues](https://gitlab.com/meltano/meltano/-/issues) that may benefit from your perspective,\n- [check out the Python codebase](https://gitlab.com/meltano/meltano) if you're curious, and _last but not least_:\n- **consider [contributing to Meltano](https://meltano.com/#contributing), its [documentation](https://meltano.com/docs/), and its [extractors](https://meltano.com/plugins/extractors/) and [loaders](https://meltano.com/plugins/loaders/)**, so that your, our, and everyone else's hopes and dreams for Meltano may actually come true.\n\nI can't wait to see what we'll be able to accomplish together.\n\nSee you soon on [Slack](https://join.slack.com/t/meltano/shared_invite/zt-cz7s15aq-HXREGBo8Vnu4hEw1pydoRw) or [GitLab.com](https://gitlab.com/meltano/meltano)!\n",[745,9],{"slug":6929,"featured":6,"template":680},"why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines","content:en-us:blog:why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines.yml","Why Gitlab Is Building Meltano An Open Source Platform For Elt Pipelines","en-us/blog/why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines.yml","en-us/blog/why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines",{"_path":6935,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6936,"content":6942,"config":6947,"_id":6949,"_type":14,"title":6950,"_source":16,"_file":6951,"_stem":6952,"_extension":19},"/en-us/blog/why-gitlab-uses-a-monthly-release-cycle",{"title":6937,"description":6938,"ogTitle":6937,"ogDescription":6938,"noIndex":6,"ogImage":6939,"ogUrl":6940,"ogSiteName":667,"ogType":668,"canonicalUrls":6940,"schema":6941},"How we maintain product velocity with a monthly release cycle","This workplace has gone 85 months without missing a release.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678822/Blog/Hero%20Images/monthlyrelease.jpg","https://about.gitlab.com/blog/why-gitlab-uses-a-monthly-release-cycle","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we maintain product velocity with a monthly release cycle\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-11-21\",\n      }",{"title":6937,"description":6938,"authors":6943,"heroImage":6939,"date":6944,"body":6945,"category":299,"tags":6946},[930],"2018-11-21","\nWe eagerly await the 22nd of each month, because we get to share the hard work of our team and community\nwith our users. Our [release](/releases/) schedule illustrates our belief in the\n[importance of velocity](/handbook/engineering/development/principles/#the-importance-of-velocity) and our\ncommitment to delivering value in the form of features. Monthly releases might\nseem aggressive to outsiders, but we believe our ability to deliver\nfeatures quickly is a competitive advantage that has helped us find success.\n\n## Three cheers for a monthly release cycle!\n\nA monthly release cycle is great for users, because it adds value at a\npredictable pace. Most of GitLab's installations are self managed, helping users\nidentify when new functionality will arrive. As a user, you can look forward to the 22nd of\nevery month, knowing that's when new features become available.\n\n![A screenshot of a user's comment, which says that he is looking forward to February 22 more than 21, which is his birthday](https://about.gitlab.com/images/blogimages/releasebirthday.png){: .shadow}\n\n### A predictable schedule helps with planning team capacity\n\nThe team has also found that it's easier to determine how much capacity we have\nwhen we're planning our workload, because there's no need to agree on\ndeadlines. GitLab team-members know exactly how many days they have to work on something,\nand we can plan in advance. Working with such a short timeframe forces us to\nbuild features as small as possible. We can't say, \"I need two releases for this,\" so we have to really think about how we\ncan make a feature smaller, an idea that's reinforced by our\n[iteration value](https://handbook.gitlab.com/handbook/values/#iteration).\n\n> \"Sometimes, team members will propose having longer release cycles, because\nwe'd be able to fit more into it, or we'd be able to do better quality control,\nbut I think that's short sighted. It's true that in a longer release cycle, you\ncan fit more in, but that will require bigger steps, and we know that\niteration is what makes us achieve velocity. So, the smaller the steps we take,\nthe faster we can go and the easier it is to do quality control. We think a\nmonthly release cycle is the optimal frequency.\" -- [Sid Sijbrandij](/company/team/#sytses), GitLab CEO\n\n## Challenges posed by monthly releases\n\n### It's not always easy to stick to the plan\n\nIt's not all sunshine and rainbows. While we're strong proponents of the monthly release, we realize that there are\nquite a few challenges. A few of the drawbacks relate to our actual deadline\nand staying on track with the [direction](/direction/) of GitLab. For example,\na release might be far away, but we want to ship something now. Or, we want to\nship something, but the release just passed, and now we have to wait. Sometimes\nwe try to cram in something just before the merge window closes even\nthough it's not ready. When these situations arise, we remember to\n[shift objectives](/blog/why-we-shift-objectives-and-not-release-dates-at-gitlab/)\nand focus on providing value to our users.\n\n### There's a big impact on morale\n\nOne of the most bittersweet aspects of a monthly release cycle is that morale can soar\nor suffer, depending on the processes that are put in place. We're thrilled\nwhen we make something and quickly see the benefits. With our schedule, GitLab team-members see\ntheir hard work in action within a month, which is way better than\nin some enterprise organizations in which someone might make something that won't\nget released for six months. At GitLab, people can rapidly effect change with\ntheir contributions and feel like their work matters.\n\nThere were times in the past, however, when our release schedule dampened morale.\nIn the early days, we had a few times when things came down to the wire, and we\nbattled bugs right until the end, making the 22nd look like an impossibility.\nThe rush towards the end caused quite a bit of stress on the team, so we decided\nto create a bigger buffer between the release and the closing of our merge window.\nWe also developed a\n[release process](https://gitlab.com/gitlab-org/release/docs/blob/master/README.md)\nto help keep us on track. Our\n[release template](https://gitlab.com/gitlab-org/release-tools/blob/master/templates/monthly.md.erb)\nhas helped us identify problems earlier so that we're not feeling pressured at\nthe end, which could be detrimental to everyone's motivation.\n\n## How to determine the right cadence\n\nWhile we've found success with monthly releases, we understand that it's not\nright for everyone. If you're a SaaS provider, you don't need releases and can\njust ship whenever something is finished. If you're shipping software that people\nmanage themselves, you're going to have to do versioning, so that people know\nwhat version they're on and are aware of releases, patches, and upgrades. A\nmonthly release cycle forces you into this, so in that case, this cadence would\nwork well for your team. If there's hardware in the loop, extensive testing, or\nhuman testing that's needed, then a longer release cadence is required, because\nyou have to factor in significant fixed costs.\n\nIdentifying the right cadence for your organization comes down to experimentation\nand what your users will accept. Some user bases may only want to update every\nquarter or twice a year, while others want more frequent versions to stay current\nwith industry changes. Because we strongly believe that smaller steps let you\ndeliver faster, we encourage you to start with monthly releases and work towards\nsettling on the pacing that works best. [Collaborate](https://handbook.gitlab.com/handbook/values/#collaboration)\nwith your team to create processes that simplify actions and hold\n[retrospectives](/handbook/engineering/management/group-retrospectives/) to make\nadjustments. We've found that our\n[retrospectives](/blog/our-retrospective-and-kickoff-are-public/) are\nextremely helpful in identifying consistent problem areas.\n\n[Cover image](https://www.pexels.com/photo/22-apartment-architecture-building-210790/) licensed under [CC 0](https://www.pexels.com/creative-commons-images/)\n{: .note}\n",[722,9],{"slug":6948,"featured":6,"template":680},"why-gitlab-uses-a-monthly-release-cycle","content:en-us:blog:why-gitlab-uses-a-monthly-release-cycle.yml","Why Gitlab Uses A Monthly Release Cycle","en-us/blog/why-gitlab-uses-a-monthly-release-cycle.yml","en-us/blog/why-gitlab-uses-a-monthly-release-cycle",{"_path":6954,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6955,"content":6960,"config":6965,"_id":6967,"_type":14,"title":6968,"_source":16,"_file":6969,"_stem":6970,"_extension":19},"/en-us/blog/why-its-crucial-to-break-things-down-into-smallest-iterations",{"title":6956,"description":6957,"ogTitle":6956,"ogDescription":6957,"noIndex":6,"ogImage":1452,"ogUrl":6958,"ogSiteName":667,"ogType":668,"canonicalUrls":6958,"schema":6959},"Why iterative software development is critical","How we learned from our mistakes and adopted an iterative software development mentality to reduce the likelihood of shipping something that doesn't add value.","https://about.gitlab.com/blog/why-its-crucial-to-break-things-down-into-smallest-iterations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why iterative software development is critical\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matej Latin\"}],\n        \"datePublished\": \"2021-04-30\",\n      }",{"title":6956,"description":6957,"authors":6961,"heroImage":1452,"date":6962,"body":6963,"category":743,"tags":6964},[1897],"2021-04-30","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-05-05.\n{: .note .alert-info .text-center}\n\nIn a previous blog post called [Small experiments, significant results](/blog/small-experiments-significant-results-and-learnings/) I shared our recent success with conducting small experiments, but, in reality, we didn't start with the most iterative software development approach. It was the Growth team's early failures to iterate that helped us embrace launching smaller experiments with measurable results.\n\nWhen the [Growth team](/handbook/engineering/development/growth/) formed at GitLab in late 2019, we had little experience with designing, implementing, and shipping experiments intended to accelerate the growth of our user base. We hired experienced people but it was still hard to predict [how long it would take to implement and ship an experiment](/handbook/engineering/development/growth/#running-experiments). The \"Suggest a pipeline\" experiment was the first one I worked on with the Growth:Expansion team. The idea was simple: Guide users through our UI to help them set up a [CI/CD pipeline](/blog/guide-to-ci-cd-pipelines/).\n\n![The guided tour entry](https://about.gitlab.com/images/blogimages/smallest-iterations/suggest.png)\nThe first iteration of the \"suggest a pipeline\" guided tour.\n{: .note.text}\n\n[See the original prototype of the \"suggest a pipeline\" guided tour.](https://www.sketch.com/s/1794d37d-c722-4d32-862e-9c6c5d831149/a/zn1Z9o/play)\n\nThe guided tour would start on the merge request page and ask the user if they want to learn how to set up a CI/CD pipeline. Those who opted in would be led through the three steps required to complete the setup. The team saw this as a simple three-step guide, so we committed ourselves to ship it without first considering if it was the smallest experiment we could complete. We wanted to create a guided tour because it hadn't been done yet at GitLab, but in the end, this wasn't the most iterative software development approach. Today, our thinking is: \"What's the smallest thing we can test and learn from?\"\n\nOne of GitLab's company values is [iteration](https://handbook.gitlab.com/handbook/values/#iteration) which means that we strive to do *the smallest thing possible and get it out as quickly as possible*. The concept of [MVC (minimal viable change)](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) guides this philosophy:\n\n> We encourage MVCs to be as small as possible. Always look to make the quickest change possible to improve the user's outcome.\n\nWhile looking back, I realized we failed to embrace the MVC with the \"suggest a pipeline\" experiment, but I'm grateful for that mistake because it provided us with one of the most valuable lessons: Always strive to complete the smallest viable change first. The idea of iterative software development is valuable even, or maybe especially, with experiments.\n\nBelow are five reasons why it's important to break development down. Small iterations:\n\n- Gets value to the user faster.\n- Decreases the risk of shipping something that doesn't add value.\n- Are easier to isolate and understand the impact of the changes.\n- Ship faster so the team starts learning sooner.\n- Allow teams to begin thinking about further iterations sooner or decide to abandon the experiment earlier (saving both time and resources).\n\n![Small vs large iterations](https://about.gitlab.com/images/blogimages/smallest-iterations/chart.jpg)\nThe power of iterative software development is clear by the two workflows.\n{: .note.text}\n\n\nIn the \"non-experimental work\" figure above, team one shipped a smaller iteration quickly and updated it twice, while team two only shipped one large iteration in the same time. Team one learned from their first small iteration and adapted their solution twice in the time team two shipped a larger iteration. It took team two longer to ship the large iteration and they sacrificed earlier findings they could have used to optimize their solution.\n\nIn the \"experimental work\" figure, team one shipped a smaller first iteration and reviewed early results, which helped them make an evidence-based decision as to iterate further on their first idea, or abandon it and move on to a new idea. Through this iterative software development process, they could either ship three iterations of their first idea or abandon it and start working on the first iteration of idea two. Team one could accomplish all this development in the same amount of time it took team 2 to ship a larger first iteration of idea one. Team one is much more likely to come to successful results and learnings faster than team two.\n\n## How the \"suggest a pipeline\" experiment _should_ have been done\n\nIt's easy to reflect on our project today and see what we did wrong, but such reflection allows us to avoid repeating mistakes. The GitLab guided tour looked like a simple experiment to build and ship, but in the end it wasn't and took months to complete. Overall, the experiment was successful, but after it was implemented we took a second look and saw the project could be improved. We decided to implement some improvements by iterating on the copy in our first nudge to users to encourage more users to opt-in. Had we shipped a smaller experiment sooner, we could have iterated earlier and delivered an optimal version of the first nudge, allowing more users to benefit from the guided tour.\n\n![Had we shipped a smaller iteration, we would have improved the copy of our opt-in nudge to users sooner.](https://about.gitlab.com/images/blogimages/smallest-iterations/copy-changes.jpg)\nThe second iteration of our opt-in copy is much stronger. Shipping a smaller iteration would have encouraged more users to opt-in to our experimental \"guided tour\" feature.\n{: .note.text}\n\nBecause it took us months to complete the implementation of the experiment, it also took us months to iterate on it.\n\nIf I had to do a similar experiment now, I'd start much smaller, with something that could be built and shipped in less than a month, ideally even faster. For example, we could have shipped an iteration with that first nudge linking to an existing source that explains how to set up a pipeline. That would have enabled us to validate the placement of the nudge, its content, and its effectiveness. It would have significantly reduced the risk of the experiment.\n\nOr maybe we could have [shortened the guided tour to be just two steps](https://gitlab.com/gitlab-org/growth/product/-/issues/1662/), which is exactly what [Kevin Comoli](/company/team/#kcomoli), product designer on Growth: Conversion, did. But because our idea already seemed like a small iteration, we never felt the urgency to reduce it further. So here's another reason why it's important to really think about the smallest possible iteration first: you can never be sure that what you're aiming to do will actually be as quick and simple as expected. So even when you think that your idea is the smallest possible iteration, *think again*.\n\n## How we're applying lessons on iteration to future experiments\n\nWhen I started working on the [\"invite members\" experiment](/blog/small-experiments-significant-results-and-learnings/), my vision of how the experience should be was more complex than the \"suggest a pipeline\" guided tour experience. The idea behind the \"invite members\" experiment was that any user could invite their team members to a project and an admin user would have to approve the invitation. But because of our learnings from the pipeline tour we decided to simplify the first experiment. Instead of designing and building a whole experience, we decided to use a [painted door test](https://crstanier.medium.com/a-product-managers-guide-to-painted-door-tests-a1a5de33b473), which essentially means we are focusing on tracking the main call-to-action to gauge user interest. For the \"invite members\" experiment, the painted door test involved displaying an invite link that, once clicked, displayed a message to users that the feature wasn't ready and suggested a temporary solution. This allowed us to validate the riskiest part of the experiment: Do non-admin users even _want_ to invite their colleagues?\n\n![Modal showing \"invite members\" feature isn't ready yet](https://about.gitlab.com/images/blogimages/smallest-iterations/modal-not-ready.png)\nThe \"invite members\" painted door experiment involved displaying a modal showing that the feature wasn't ready yet, but helped us still gauge user interest in the feature before investing resources in developing the feature.\n{: .note.text}\n\n## Why iterative software development matters\n\nWe were lucky with the \"suggest a pipeline\" experiment. It was the first experiment we worked on, and it was \"low hanging fruit\", meaning it was a solution that required limited investment but still delivered big returns, which made the chance of failure lower. As we move away from obvious improvements and start exploring riskier experiments, we won't be able to rely on luck. We need to be diligent about iteration and break things down into MVCs and smaller experiments to reduce the risk of investing development time on projects that don't add value to the user experience, or fail to have a positive impact on GitLab's growth.\n\nPhoto by [Markus Spiske](https://unsplash.com/@markusspiske?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/pieces?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1152,9,700],{"slug":6966,"featured":6,"template":680},"why-its-crucial-to-break-things-down-into-smallest-iterations","content:en-us:blog:why-its-crucial-to-break-things-down-into-smallest-iterations.yml","Why Its Crucial To Break Things Down Into Smallest Iterations","en-us/blog/why-its-crucial-to-break-things-down-into-smallest-iterations.yml","en-us/blog/why-its-crucial-to-break-things-down-into-smallest-iterations",{"_path":6972,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6973,"content":6979,"config":6984,"_id":6986,"_type":14,"title":6987,"_source":16,"_file":6988,"_stem":6989,"_extension":19},"/en-us/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale",{"title":6974,"description":6975,"ogTitle":6974,"ogDescription":6975,"noIndex":6,"ogImage":6976,"ogUrl":6977,"ogSiteName":667,"ogType":668,"canonicalUrls":6977,"schema":6978},"The next step in performance testing? The GitLab Environment Toolkit","Learn how we're building a new toolkit to help with performance testing and deploying GitLab at scale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682030/Blog/Hero%20Images/gitlab_environment_toolkit_scale.jpg","https://about.gitlab.com/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The next step in performance testing? The GitLab Environment Toolkit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Grant Young\"}],\n        \"datePublished\": \"2021-06-15\",\n      }",{"title":6974,"description":6975,"authors":6980,"heroImage":6976,"date":6981,"body":6982,"category":743,"tags":6983},[3908],"2021-06-15","\n\nLast year I wrote about how the [Quality Engineering Enablement team](/handbook/engineering/quality/) was [building up the performance testing of GitLab](/blog/how-were-building-up-performance-testing-of-gitlab/) with the [GitLab Performance Tool (GPT)](https://gitlab.com/gitlab-org/quality/performance). Last year, the biggest challenge with performance testing wasn't so much the testing but rather setting up the right large scale GitLab environments to test against.\n\nLike any server application, deploying at scale is challenging. That's why we built another toolkit that automates the deployment of GitLab at scale: The [GitLab Environment Toolkit (GET)](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit).\n\n![GitLab Environment Toolkit logo](https://about.gitlab.com/images/blogimages/gitlab-environment-toolkit/gitlab_environment_toolkit_logo.png){: .center}\nGitLab Environment Toolkit logo\n{: .note.text-center}\n\nInternally called the \"Performance Environment Builder\" (PEB), GET grew alongside GPT as we continued to expand our performance testing efforts. Over time we built a toolkit that was quite capable in its own right of deploying GitLab at scale, which is why it started to gain attention internally from other teams and then even from some customers. Soon we realized we built something worth sharing.\n\nThe Quality Engineering Enablement team has been working hard over the last few months to polish the toolkit for broader use and we're happy to share that the first version of [GET v1.0.0](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/releases/v1.0.0) has been released!\n\nGET is a collection of well-known open source provisioning and configuration tools with a simple focused purpose - to deploy [GitLab Omnibus](https://gitlab.com/gitlab-org/omnibus-gitlab) and [GitLab Helm Charts](https://docs.gitlab.com/charts/) at scale, as defined by our [Reference Architectures](https://docs.gitlab.com/ee/administration/reference_architectures) and [Cloud Native Hybrid Reference Architectures](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative). Built with Terraform and Ansible, GET supports the provisioning and configuring of machines and other related infrastructure and contains the following features:\n\n - Support for deploying all GitLab Reference Architectures sizes dynamically from 1000 to 50,000\n - Support for deploying Cloud Native Hybrid Reference Architectures (GCP only at this time)\n - GCP, AWS, and Azure cloud provider support\n - Upgrades\n - Release and nightly Omnibus builds support\n - Advanced search with Elasticsearch\n - Geo support\n - Zero Downtime Upgrades support\n - Built-in load balancing via HAProxy and monitoring (Prometheus, Grafana) support\n\nWe're just getting started with GET, and [continue to add more support for features and different environment setups](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/boards?group_by=epic). Now that GET [v1.0.0](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/releases/v1.0.0) has been released, we're at a good place for customers to start trialing and evaluating GET. We do ask that you take into consideration the continuing expansion of capabilities, as well as limitations of the current version.\n\nRead on to learn about the the philosophy of GET and how it works.\n\n## The design principals of GET\n\nOur team has past experience with provisioning and configuration tools, so we've learned what does and does not work, which is why we try to stick to the following goals:\n\n- GET is [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions): The word boring may look funny here but it's actually a [GitLab value](https://handbook.gitlab.com/handbook/values/). A boring solution essentially means to keep it simple. Provisioning and configuration solutions can get complicated **fast** with many common pitfalls, such as trying to support complex setups that come with a heavy maintenance cost. From the very beginning we've tried to avoid this, so GET essentially uses a standard Terraform and Ansible config that doesn't try to do anything fancy or complicated.\n- GET is *not* a replacement for [GitLab Omnibus](https://gitlab.com/gitlab-org/omnibus-gitlab) or the [Helm Charts](https://docs.gitlab.com/charts/): Truly some of the greatest \"magic\" in setting up GitLab is how much easier it's made Omnibus and the Helm Charts. Thanks to the incredible work by our Distribution teams, both of these install methods do a lot under the hood, and GET is not trying to replace these. In the same [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions) vein, GET's purpose is simply to set up GitLab environments at scale by installing Omnibus or Helm in the right places (along with any other needed infrastructure to support).\n- GET is one for all and designed to work for all our recommended [GitLab Reference Architectures](https://docs.gitlab.com/ee/administration/reference_architectures/). Everything we do with GET has to be considered against this goal. It means we may not be able to support niche or overly complex set ups as this will lead to complex code and heavy maintenance costs. We do aim to support recommended customizations where appropriate.\n\nNext we look at how GET works at a high level, starting with provisioning with Terraform.\n\n## Provisioning the environment with Terraform\n\nThe first step to building an environment is to provision the machines and/or Kubernetes clusters that run GitLab. We undergo this process with the well-known provisioning tool, [Terraform](https://www.terraform.io/).\n\nNext, we've created multiple [Terraform modules](https://www.terraform.io/docs/language/modules/develop/index.html) in GET for each of the main big three cloud providers (GCP, AWS and Azure) that provision machines for you, according to the appropriate [reference architectures](https://docs.gitlab.com/ee/administration/reference_architectures/), along with the necessary supporting infrastructure, such as firewalls, load balancers, etc. We designed these modules to be as simple as possible and only require minimal configuration.\n\nFor more information on the entire Terraform configuration, [check out our docs](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/blob/master/docs/environment_provision.md). An example of one of the main config files is `environment.tf`, which defines how each component's nodes should be setup. Below is an example of how it is configured with GCP for a [10k reference architecture](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html) environment:\n\n```tf\nmodule \"gitlab_ref_arch_gcp\" {\n  source = \"../../modules/gitlab_ref_arch_gcp\"\n\n  prefix = var.prefix\n  project = var.project\n\n  object_storage_buckets = [\"artifacts\", \"backups\", \"dependency-proxy\", \"lfs\", \"mr-diffs\", \"packages\", \"terraform-state\", \"uploads\"]\n\n  # 10k\n  consul_node_count = 3\n  consul_machine_type = \"n1-highcpu-2\"\n\n  elastic_node_count = 3\n  elastic_machine_type = \"n1-highcpu-16\"\n\n  gitaly_node_count = 3\n  gitaly_machine_type = \"n1-standard-16\"\n\n  praefect_node_count = 3\n  praefect_machine_type = \"n1-highcpu-2\"\n\n  praefect_postgres_node_count = 1\n  praefect_postgres_machine_type = \"n1-highcpu-2\"\n\n  gitlab_nfs_node_count = 1\n  gitlab_nfs_machine_type = \"n1-highcpu-4\"\n\n  gitlab_rails_node_count = 3\n  gitlab_rails_machine_type = \"n1-highcpu-32\"\n\n  haproxy_external_node_count = 1\n  haproxy_external_machine_type = \"n1-highcpu-2\"\n  haproxy_external_external_ips = [var.external_ip]\n  haproxy_internal_node_count = 1\n  haproxy_internal_machine_type = \"n1-highcpu-2\"\n\n  monitor_node_count = 1\n  monitor_machine_type = \"n1-highcpu-4\"\n\n  pgbouncer_node_count = 3\n  pgbouncer_machine_type = \"n1-highcpu-2\"\n\n  postgres_node_count = 3\n  postgres_machine_type = \"n1-standard-4\"\n\n  redis_cache_node_count = 3\n  redis_cache_machine_type = \"n1-standard-4\"\n  redis_sentinel_cache_node_count = 3\n  redis_sentinel_cache_machine_type = \"n1-standard-1\"\n  redis_persistent_node_count = 3\n  redis_persistent_machine_type = \"n1-standard-4\"\n  redis_sentinel_persistent_node_count = 3\n  redis_sentinel_persistent_machine_type = \"n1-standard-1\"\n\n  sidekiq_node_count = 4\n  sidekiq_machine_type = \"n1-standard-4\"\n}\n\noutput \"gitlab_ref_arch_gcp\" {\n  value = module.gitlab_ref_arch_gcp\n}\n````\n\nWith this environment and [two other small config files in place](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/blob/master/docs/environment_provision.md#2-setup-the-environments-config) Terraform can be run normally and work its magic. Below is a snippet of the output you'll see with GCP:\n\n```\n[...]\n\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[2]: Creating...\nmodule.gitlab_ref_arch_gcp.module.pgbouncer.google_compute_instance.gitlab[2]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.pgbouncer.google_compute_instance.gitlab[0]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.consul.google_compute_instance.gitlab[1]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[1]: Creating...\nmodule.gitlab_ref_arch_gcp.module.gitlab_nfs.google_compute_instance.gitlab[0]: Creation complete after 25s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[1]: Creating...\nmodule.gitlab_ref_arch_gcp.module.gitaly.google_compute_instance.gitlab[1]: Creation complete after 14s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[2]: Creating...\nmodule.gitlab_ref_arch_gcp.module.gitaly.google_compute_instance.gitlab[0]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[0]: Creating...\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[0]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.pgbouncer.google_compute_instance.gitlab[1]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.pgbouncer.google_compute_instance.gitlab[2]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.pgbouncer.google_compute_instance.gitlab[0]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[0]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.gitaly.google_compute_instance.gitlab[2]: Still creating... [20s elapsed]\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[2]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[1]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[1]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[2]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[0]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.gitaly.google_compute_instance.gitlab[2]: Creation complete after 25s\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[2]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[1]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[1]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[0]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[2]: Creation complete after 15s\nReleasing state lock. This may take a few moments...\n\nApply complete! Resources: 90 added, 0 changed, 0 destroyed.\n```\n\nOnce it's done, you should have a full set of machines for GitLab that can be configured with Ansible, which is what we'll look at next.\n\n## How to configure the environment with Ansible\n\nThe next step for setting up the environment is configuring [Ansible](https://www.ansible.com/). In a nutshell, this tool connects to each machine via SSH and runs tasks to configure GitLab.\n\nLike with Terraform, [we've created multiple roles](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html) and [Playbooks](https://docs.ansible.com/ansible/latest/user_guide/playbooks_intro.html) in GET that are designed to configure each component on the intended machine. Through Terraform, we apply labels to each machine that Ansible then tracks using its [dynamic inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_dynamic_inventory.html) to define the purpose of each machine.\n\nA detailed breakdown of the configuration process is available in the [GET for Ansible docs](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/blob/master/docs/environment_provision.md). But, an example one of the main config files is `environment.tf`, which defines how the nodes of each component should be setup. Below is an example of how it looks with GCP for a [10k user reference architecture](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html) environment:\n\nLike we did before with Terraform, we'll highlight one of the main config files, but you can see the full process in the [docs](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/blob/master/docs/environment_configure.md). The file is `vars.yml`, an inventory variable file for your environment that contains various parts of the config Ansible needs to perform the setup, along with key GitLab config:\n\n```yml\nall:\n  vars:\n    # Ansible Settings\n    ansible_user: \"\u003Cssh_username>\"\n    ansible_ssh_private_key_file: \"\u003Cprivate_ssh_key_path>\"\n\n    # Cloud Settings\n    cloud_provider: \"gcp\"\n    gcp_project: \"\u003Cgcp_project_id>\"\n    gcp_service_account_host_file: \"\u003Cgcp_service_account_host_file_path>\"\n\n    # General Settings\n    prefix: \"\u003Cenvironment_prefix>\"\n    external_url: \"\u003Cexternal_url>\"\n    gitlab_license_file: \"\u003Cgitlab_license_file_path>\"\n\n    # Object Storage Settings\n    gitlab_object_storage_artifacts_bucket: \"{{ prefix }}-artifacts\"\n    gitlab_object_storage_backups_bucket: \"{{ prefix }}-backups\"\n    gitlab_object_storage_dependency_proxy_bucket: \"{{ prefix }}-dependency-proxy\"\n    gitlab_object_storage_external_diffs_bucket: \"{{ prefix }}-mr-diffs\"\n    gitlab_object_storage_lfs_bucket: \"{{ prefix }}-lfs\"\n    gitlab_object_storage_packages_bucket: \"{{ prefix }}-packages\"\n    gitlab_object_storage_terraform_state_bucket: \"{{ prefix }}-terraform-state\"\n    gitlab_object_storage_uploads_bucket: \"{{ prefix }}-uploads\"\n\n    # Passwords / Secrets - Can also be set as Environment Variables via ansible.builtin.env\n    gitlab_root_password: \"\u003Cgitlab_root_password>\"\n    grafana_password: \"\u003Cgrafana_password>\"\n    postgres_password: \"\u003Cpostgres_password>\"\n    consul_database_password: \"\u003Cconsul_database_password>\"\n    gitaly_token: \"\u003Cgitaly_token>\"\n    pgbouncer_password: \"\u003Cpgbouncer_password>\"\n    redis_password: \"\u003Credis_password>\"\n    praefect_external_token: \"\u003Cpraefect_external_token>\"\n    praefect_internal_token: \"\u003Cpraefect_internal_token>\"\n    praefect_postgres_password: \"\u003Cpraefect_postgres_password>\"\n```\n\nWith the variable file and the [environment inventory configured](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/blob/master/docs/environment_configure.md#2-setup-the-environments-dynamic-inventory) Ansible can run normally. Here is a snippet of the output you'll see with GCP:\n\n```\n[...]\n\nTASK [gitlab-rails : Update Postgres primary IP and Port] **********************\nok: [gitlab-qa-10k-gitlab-rails-1]\nTASK [gitlab-rails : Setup GitLab deploy node config file with DB Migrations] ***\nchanged: [gitlab-qa-10k-gitlab-rails-1]\nTASK [gitlab-rails : Reconfigure GitLab deploy node] ***************************\nchanged: [gitlab-qa-10k-gitlab-rails-1]\nTASK [gitlab-rails : Setup all GitLab Rails config files] **********************\nchanged: [gitlab-qa-10k-gitlab-rails-1]\nok: [gitlab-qa-10k-gitlab-rails-3]\nok: [gitlab-qa-10k-gitlab-rails-2]\nTASK [gitlab-rails : Reconfigure all GitLab Rails] *****************************\nchanged: [gitlab-qa-10k-gitlab-rails-1]\nchanged: [gitlab-qa-10k-gitlab-rails-3]\nchanged: [gitlab-qa-10k-gitlab-rails-2]\nTASK [gitlab-rails : Restart GitLab] *******************************************\nchanged: [gitlab-qa-10k-gitlab-rails-3]\nchanged: [gitlab-qa-10k-gitlab-rails-1]\nchanged: [gitlab-qa-10k-gitlab-rails-2]\n\n[...]\n\nPLAY RECAP *********************************************************************\ngitlab-qa-10k-consul-1     : ok=29   changed=17   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-consul-2     : ok=28   changed=16   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-consul-3     : ok=28   changed=16   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-elastic-1    : ok=41   changed=9    unreachable=0    failed=0    skipped=61   rescued=0    ignored=0\ngitlab-qa-10k-elastic-2    : ok=37   changed=7    unreachable=0    failed=0    skipped=62   rescued=0    ignored=0\ngitlab-qa-10k-elastic-3    : ok=37   changed=7    unreachable=0    failed=0    skipped=62   rescued=0    ignored=0\ngitlab-qa-10k-gitaly-1     : ok=27   changed=15   unreachable=0    failed=0    skipped=30   rescued=0    ignored=0\ngitlab-qa-10k-gitaly-2     : ok=26   changed=14   unreachable=0    failed=0    skipped=30   rescued=0    ignored=0\ngitlab-qa-10k-gitaly-3     : ok=26   changed=14   unreachable=0    failed=0    skipped=30   rescued=0    ignored=0\ngitlab-qa-10k-gitlab-nfs-1 : ok=28   changed=7    unreachable=0    failed=0    skipped=55   rescued=0    ignored=0\ngitlab-qa-10k-gitlab-rails-1 : ok=41   changed=21   unreachable=0    failed=0    skipped=32   rescued=0    ignored=0\ngitlab-qa-10k-gitlab-rails-2 : ok=35   changed=16   unreachable=0    failed=0    skipped=33   rescued=0    ignored=0\ngitlab-qa-10k-gitlab-rails-3 : ok=35   changed=16   unreachable=0    failed=0    skipped=33   rescued=0    ignored=0\ngitlab-qa-10k-haproxy-external-1 : ok=40   changed=8    unreachable=0    failed=0    skipped=62   rescued=0    ignored=0\ngitlab-qa-10k-haproxy-internal-1 : ok=39   changed=8    unreachable=0    failed=0    skipped=60   rescued=0    ignored=0\ngitlab-qa-10k-monitor-1    : ok=43   changed=19   unreachable=0    failed=0    skipped=35   rescued=0    ignored=0\ngitlab-qa-10k-pgbouncer-1  : ok=30   changed=17   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-pgbouncer-2  : ok=29   changed=16   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-pgbouncer-3  : ok=29   changed=16   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-postgres-1   : ok=35   changed=16   unreachable=0    failed=0    skipped=36   rescued=0    ignored=0\ngitlab-qa-10k-postgres-2   : ok=34   changed=15   unreachable=0    failed=0    skipped=36   rescued=0    ignored=0\ngitlab-qa-10k-postgres-3   : ok=34   changed=15   unreachable=0    failed=0    skipped=36   rescued=0    ignored=0\ngitlab-qa-10k-praefect-1   : ok=29   changed=18   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-praefect-2   : ok=26   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-praefect-3   : ok=26   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-praefect-postgres-1 : ok=25   changed=14   unreachable=0    failed=0    skipped=29   rescued=0    ignored=0\ngitlab-qa-10k-redis-cache-1 : ok=26   changed=15   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-cache-2 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-cache-3 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-persistent-1 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-persistent-2 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-persistent-3 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-cache-1 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-cache-2 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-cache-3 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-persistent-1 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-persistent-2 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-persistent-3 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-sidekiq-1    : ok=28   changed=15   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-sidekiq-2    : ok=27   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-sidekiq-3    : ok=27   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-sidekiq-4    : ok=27   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\nlocalhost                  : ok=18   changed=3    unreachable=0    failed=0    skipped=38   rescued=0    ignored=0\n```\n\nOnce Ansible is done, you should have a fully running GitLab environment at scale!\n\n## What's next?\n\nWe've got a bunch of things planned for GET so it can support more features when setting up GitLab, such as SSL support, [cloud native hybrid architectures](/blog/cloud-native-architectures-made-easy/) on other cloud providers, object storage customization, and much more. We know deploying production-ready server applications is hard and has many potential requirements depending on the customer, and we hope to eventually support all recommended setups.\n\nCheck out the [GET development board](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/boards?group_by=epic) and our [issue list](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/issues) to see what is in progress. Share feedback and suggestions by adding to our issue lists, we're keen to hear what's important to customers.\n\n[Cover image](https://unsplash.com/photos/icdVDptHxpM) by [Jean Vella](https://unsplash.com/@jean_vella?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[9,1295],{"slug":6985,"featured":6,"template":680},"why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale","content:en-us:blog:why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale.yml","Why We Are Building The Gitlab Environment Toolkit To Help Deploy Gitlab At Scale","en-us/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale.yml","en-us/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale",{"_path":6991,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6992,"content":6997,"config":7002,"_id":7004,"_type":14,"title":7005,"_source":16,"_file":7006,"_stem":7007,"_extension":19},"/en-us/blog/why-we-chose-echarts",{"title":6993,"description":6994,"ogTitle":6993,"ogDescription":6994,"noIndex":6,"ogImage":5746,"ogUrl":6995,"ogSiteName":667,"ogType":668,"canonicalUrls":6995,"schema":6996},"Why we chose ECharts for data visualizations","Learn why GitLab switched from D3.js to ECharts as our library of choice for rendering data visualizations.","https://about.gitlab.com/blog/why-we-chose-echarts","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we chose ECharts for data visualizations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Clement Ho\"}],\n        \"datePublished\": \"2019-09-30\",\n      }",{"title":6993,"description":6994,"authors":6998,"heroImage":5746,"date":6999,"body":7000,"category":743,"tags":7001},[4781],"2019-09-30","\nAs GitLab continues to grow in depth and breadth across the [DevOps lifecycle](/topics/devops/), the use of charts and data visualizations has increased in frequency and complexity. Throughout the life of GitLab as a project, we've used multiple libraries to render beautiful charts. As the number of different libraries increased along with our charting requirements, we decided it was time to start unifying our charting libraries to help us move quickly.\n\nAt first, we wanted to unify our charts using D3.js but this was difficult because D3.js isn't a charting library. In their own words: \"D3.js is a JavaScript library for manipulating documents based on data,\" meaning it is a low level visualization tool. D3.js is powerful but it has a big learning curve. Our team did not have the time to develop the expertise without impacting our product development velocity. We also knew we had an ambitious hiring plan, and we would be adding time to our onboarding process by using D3.js.\n\nThe frontend team set out to investigate different charting libraries that we could use to gain more velocity. The library didn't have to do everything we needed, but it had to get us most of the way there. We investigated many libraries including ECharts, Britecharts, and Plotly as potential options. In the end, ECharts was the clear winner for us. Here's why:\n\n## Echarts robust yet flexible chart types\nOn the monitor stage frontend team, we have the [ambitious goal of replacing well-known monitoring tools like DataDog and Grafana](/direction/monitor/). It was absolutely critical that our charting library had enough flexibility for us to create our own custom charts, but it was also important that the library had existing charts so that we didn’t have to create every chart from scratch for the sake of development velocity.\n\nECharts has an [incredible showcase](https://echarts.apache.org/examples/en/) of the adaptability of their charts. This was a great starting point for us. We tested out styling ECharts to match our design system to determine how adaptable it was and we were very satisfied with the results.\n\n![design](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/design.png)\n*Design spec for future GitLab charts.*\n\n![implementation](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/implementation.png)\n*Evaluation implementation using ECharts.*\n\n## Echarts performance\nWhen we were evaluating ECharts, we took one of our most complex user interactions for charts to benchmark the performance of the charting library. Although ECharts wasn’t perfect, it fared better than the alternatives. Below are some gifs recorded from changing the chart values in our [evaluation project](https://gitlab.com/adriel/echarts-proof-of-concept). As you can see, performance does decrease as the data points increase but it is still usable and it is unlikely we would have that many points in such a small chart.\n\n![10 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/10-points.gif)\n*Linked chart with 10 values.*\n\n![100 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/100-points.gif)\n*Linked chart with 100 values.*\n\n![1000 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/1000-points.gif)\n*Linked chart with 1000 values.*\n\n![4000 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/4000-points.gif)\n*Linked chart with 4000 values.*\n\n## Growing ecosystem\n\nECharts isn’t perfect but it has [improved over time](https://incubator.apache.org/projects/echarts.html). It started off as an [open source project from Baidu](https://whimsy.apache.org/board/minutes/ECharts.html) but is still going through the process of being incubated into the Apache Software Foundation. The [majority of ECharts users still seem to be based in China](https://echarts.apache.org/en/committers.html), meaning the developer community and corresponding documentation is written primarily in Chinese. Despite some language barriers, the ECharts community does seem to be growing more internationally. We’ve come across a variety of companies from the United States and Mexico who are either evaluating or using ECharts internally.\n\nThe Podling Project Management Committee (PPMC) of ECharts, which is their core team in GitLab terms, has also been very welcoming and energetic about growing the ecosystem. As we decided on ECharts and began developing new charts and replacing old charts, we’ve been able to build a partnership with the company. They have been very kind to meet with us online every month to help answer questions and to guide us in using their library effectively. This has been extremely helpful. For example during one of our meetings, Shuang Su gave us a brief walkthrough of the codebase and it's architecture.\n\n## Where we are today with Echarts\n\nWe introduced [ECharts to the GitLab codebase in 11.6](https://gitlab.com/gitlab-org/gitlab-ce/issues/53147) and through ECharts have been rapidly building new chart types into our component library at a faster rate than ever before. We started with updating the charts in just our Monitor stage but have since introduced charts into the [Secure](https://gitlab.com/gitlab-org/gitlab-ee/issues/6954) and [Manage](https://gitlab.com/gitlab-org/gitlab-ee/issues/12079) stages.\n\nDepending on your use case, Apache ECharts could be a good fit for you too. For our team, ECharts has without a doubt increased our product development velocity over against what it was with D3.js.\n\n| Old chart in D3.js | New chart in ECharts |\n|",[9,5429,3138],{"slug":7003,"featured":6,"template":680},"why-we-chose-echarts","content:en-us:blog:why-we-chose-echarts.yml","Why We Chose Echarts","en-us/blog/why-we-chose-echarts.yml","en-us/blog/why-we-chose-echarts",{"_path":7009,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7010,"content":7016,"config":7020,"_id":7022,"_type":14,"title":7023,"_source":16,"_file":7024,"_stem":7025,"_extension":19},"/en-us/blog/why-we-created-the-gitlab-memory-team",{"title":7011,"description":7012,"ogTitle":7011,"ogDescription":7012,"noIndex":6,"ogImage":7013,"ogUrl":7014,"ogSiteName":667,"ogType":668,"canonicalUrls":7014,"schema":7015},"Why we created a Memory team at GitLab","GitLab has a memory problem, so we created a specialized team to fix it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678549/Blog/Hero%20Images/memory_team_arie-wubben.jpg","https://about.gitlab.com/blog/why-we-created-the-gitlab-memory-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we created a Memory team at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-09-13\",\n      }",{"title":7011,"description":7012,"authors":7017,"heroImage":7013,"date":6072,"body":7018,"category":743,"tags":7019},[672],"\nGitLab is an [all-in-one DevOps solution](/topics/devops/) with a growing feature set. But as more features are added to the application, more memory is required. Some users have reportedly elected to migrate to other tools because the memory footprint required to run a minimum GitLab instance was exorbitant:\n\n> “GitLab is great and I have used it for years but I recently switched to Gogs for self-hosted repositories because it is much faster, easier to set up, and walk in a park to maintain. It doesn't have all the features (bloat) that GitLab has but it can probably satisfy >95% of Git users.” – [Jnr on HackerNews](https://news.ycombinator.com/item?id=19227935)\n\n> “If GitLab grows any more features I'll be moving away simply to ensure confidence that I understand my own infrastructure in the limited time I have to maintain it. It's the weirdest kind of success problem to have, but the truth is if it wasn't such a pain to make the move, I'd have transitioned away from GitLab six months ago.” – [Sir_Substance on HackerNews](https://news.ycombinator.com/item?id=19230557)\n\n## Step 1: Establish priorities to solve our memory problem\n\nWe created the [GitLab Memory team](/handbook/engineering/development/enablement/data_stores/application_performance/) to tackle this performance challenge. The aim of the Memory team is to [reduce the minimum instance for GitLab from 8GB](https://gitlab.com/gitlab-org/gitlab-ce/commit/0cd5d968038d6d64d95add0bbe3d63d8fcfdc23b) to 1GB of RAM. By reducing the memory required to run GitLab to 1GB, [our application can run anywhere](https://gitlab.com/groups/gitlab-org/-/epics/448), even on inexpensive commodity computers like an unaltered [Raspberry Pi 3 Model B+](https://www.raspberrypi.org/products/raspberry-pi-3-model-b-plus/).\n\nThere is no quick fix for reducing GitLab’s memory footprint, but the team has started by investigating memory and performance bottlenecks, gathering data, and prioritizing activities for the next three to four months based on these results.\n\n“We know we have memory issues to address, but we need more data to determine the source, the impact and how to best approach the problem,” says [Craig Gomes](/company/team/#craiggomes), memory engineering manager.\n\n[Kamil Trzciński](/company/team/#ayufanpl), distinguished engineer and memory specialist at GitLab, says the top three priorities for the Memory team fall into three distinct buckets:\n\n1. [Moving over to Puma](https://gitlab.com/groups/gitlab-org/-/epics/954)\n1. [Perform the low-level exercise by optimizing endpoints](https://gitlab.com/groups/gitlab-org/-/epics/448)\n1. [Improving our development practices](https://gitlab.com/groups/gitlab-org/-/epics/1415)\n\n### Migrating from Unicorn to Puma\n\nPreliminary research shows that the bulk of GitLab’s memory usage comes from running web application processes on Unicorn.\n\n“Each Web application process (Unicorn) can take 500 MB of RAM, and it can handle a single request at a time. The more users and traffic we need to support, the more processes and hence RAM we need,” says [Stan Hu](/company/team/#stanhu), engineering fellow at GitLab.\n\nOne of the first projects the Memory team is tackling is testing to see if migrating from Unicorn to Puma will reduce GitLab’s memory footprint. Both Unicorn and Puma are multi-threaded HTTP web servers that run on Rails, but unlike Unicorn, Puma is threaded and does not require as much memory.\n\nThe Memory team has successfully [configured Puma to work on dev.gitlab.com](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/82) to test its functionality and measure its memory reduction. The next big project in this domain is to [enable Puma on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/954).\n\n### Dig deeper into what's causing memory issues for GitLab.com\n\nBefore GitLab is able to run on less memory, the team needs to fix the memory problems we know about already on GitLab.com. One of these problems is the memory killer on open source background processor, Sidekiq.\n\n\"If a Sidekiq job runs, takes too much memory, and then gets killed, jobs in the queue will be retried indefinitely,\" says Stan. The team is working to fix this, along with other priority one problems with memory usage in [project import](https://gitlab.com/gitlab-org/gitlab-ce/issues/59754) and [exports](https://gitlab.com/gitlab-org/gitlab-ce/issues/35389) in the 12.3 release.\n\n### Improve development practices around memory usage\n\nThe Memory team created a massive epic that aims to capture related [development work focusing on making improvements to internal dev practices around code complexity and memory usage](https://gitlab.com/groups/gitlab-org/-/epics/1415).\n\n\"The reason behind that is to enable everyone during development to understand the impact of introducing new changes to the application,\" says Kamil in the [epic](https://gitlab.com/groups/gitlab-org/-/epics/1415). Some of the projects they are working on for the 12.3 release include [testing more endpoints using typical GitLab user scenarios (e.g. commenting on a MR)](https://gitlab.com/gitlab-org/quality/performance/issues/34) and set up a [performance monitoring solution across different environments](https://gitlab.com/gitlab-org/quality/performance/issues/37).\n\n## Step 2: Create a team to fix the memory problem\n\nWe need a specialized engineering team to assess the scope of the problem and identify solutions to reduce GitLab’s memory requirements.\n\n“Right now we have a very small team with two brand new team members,” says Craig. “The team is getting up to speed quickly and there is a lot of excitement about the potential of the team that more work keeps coming our way. It's a great challenge to have, and having more experienced engineers on the team will help us to achieve our goals.”\n\nThe current memory team is small but mighty. We have [Craig](/company/team/#craiggomes), the engineering manager, and three engineers on the permanent memory team: [Kamil](/company/team/#ayufanpl), [Qingyu Zhao](/company/team/#qzhaogitlab), and [Aleksei Lipniagov](/company/team/#alipniagov). The team works closely with senior product manager for distribution and memory, [Larissa Lane](/company/team/#ljlane). [We’re looking for more qualified people to join our team](https://handbook.gitlab.com/job-families/engineering/backend-engineer/#memory).\n\nThe Memory team is actively hiring engineers to help us enhance GitLab’s performance, but we have a high rejection rate because we require a specific, hard-to-find skill set. A [top priority for the Memory team is hiring at least one senior engineer in FY20-Q3](https://gitlab.com/gitlab-com/www-gitlab-com/issues/4885), which will allow us to take on a bigger workload as we move toward our goal of getting GitLab running on less than 1GB.\n\nFollow along with the Memory team by [subscribing to their channel on GitLab Unfiltered](https://www.youtube.com/playlist?list=PL05JrBw4t0Kq_5ZWIHYfbcAYjtXYcEZA3).\n\nCover photo by [Arie Wubben](https://unsplash.com/@condorito1953?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/airplane?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,1295,2396],{"slug":7021,"featured":6,"template":680},"why-we-created-the-gitlab-memory-team","content:en-us:blog:why-we-created-the-gitlab-memory-team.yml","Why We Created The Gitlab Memory Team","en-us/blog/why-we-created-the-gitlab-memory-team.yml","en-us/blog/why-we-created-the-gitlab-memory-team",{"_path":7027,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7028,"content":7034,"config":7039,"_id":7041,"_type":14,"title":7042,"_source":16,"_file":7043,"_stem":7044,"_extension":19},"/en-us/blog/why-we-pay-local-rates",{"title":7029,"description":7030,"ogTitle":7029,"ogDescription":7030,"noIndex":6,"ogImage":7031,"ogUrl":7032,"ogSiteName":667,"ogType":668,"canonicalUrls":7032,"schema":7033},"Why GitLab pays local rates","Our compensation structure is known to spark controversy, so we want to give an update on our latest iteration on team member salaries.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680461/Blog/Hero%20Images/local-rates.jpg","https://about.gitlab.com/blog/why-we-pay-local-rates","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitLab pays local rates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2019-02-28\",\n      }",{"title":7029,"description":7030,"authors":7035,"heroImage":7031,"date":7036,"body":7037,"category":808,"tags":7038},[2313],"2019-02-28","\n\nOur [compensation calculator](/handbook/total-rewards/compensation/compensation-calculator/) is a regular [hot topic on places like Hacker News](https://news.ycombinator.com/item?id=18441768#18443167) – pretty much any thread about GitLab has a comment about us paying local rates. As with everything GitLab does, we continue to [iterate](https://handbook.gitlab.com/handbook/values/#iteration) on our compensation model, and implemented a number of changes at the start of 2019. In addition to adjusting the salaries of backend developers, which were [raised considerably](https://gitlab.com/gitlab-com/www-gitlab-com/commit/9382348c3c81b92b598b0a6da0994d387bdfc404) so that we are [\"at or above market,\"](/handbook/total-rewards/compensation/#competitive-rate) according to GitLab CEO [Sid Sijbrandij](/company/team/#sytses), the location factor was also revised to better reflect the respective areas covered.\n\nBut first, let's take a step back to see how we got to here.\n\n### Why did GitLab start paying team members according to location in the first place?\n\n\"It’s something that kind of happened organically,\" Sid says. \"Every time we hired someone, we’d discuss what a reasonable compensation would be. And many times, it came back to what they were making beforehand, and that really depended a lot on where they were. So we kind of started out having local market salaries as we grew. At a certain point, we said, 'Okay, this is apparently the standard. We’re basing it not just on your function and the seniority you have in the function, but also where you live.'\"\n\nGitLab no longer uses salary history as a factor in compensation offers and does not ask candidates about their previous pay. Instead, we ask all candidates, regardless of location, for their salary expectations.\n\n### Understanding the rent index\n\nThe compensation calculator's rent index came from a noted correlation between the aforementioned local market rate salaries and rent prices in the area. Using limited data sets with more than 100 locations across the globe, an analysis was run to determine the best gauge for local rates. The rent as listed on Numbeo was found to have the highest correlation.\n\n\"When you think about it, the correlation we found made sense,\" Sid explains. \"If there’s a place where people pay high wages, it tends to attract people. And then the rents, almost by force of nature, start rising. It’s not that we want to pay you based on your rent or compensate your cost of living. We want to make sure that we pay at or above market. We found that the rent was a great way to calculate that, and it’s why there’s a rent index as part of our global compensation formula.\"\n\n### New improvements on local market calculations\n\nGitLab compensation is calculated by delving into [local market data, when possible](/handbook/total-rewards/compensation/compensation-calculator/#location-factor), to ensure that [salaries are being tabulated](https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/17460) fairly.\n\n\"Instead of using [just the rate index], what we do now is look at a number of different sources, usually four or five, to get market data for a city,\" says GitLab's outgoing Chief Culture Officer [Barbie Brewer](/company/team/#BarbieJBrewer). \"Then we find the median of that, and use it as our benchmark. That being said, you can't do this in all cities. We have a lot of employees in jobs that aren't typically available where they are located. In those instances, we fall back on the other equation. Generally speaking, it's pretty close. When we've had to go back and check those benchmarks, we found that it required very few adjustments. We were getting it right 95 percent of the time, so doing that check was good. It helped us understand that we were not that far off.\"\n\nBarbie also noted that some employees in low-income communities could fare better than expected because people in towns within 90 minutes of a large city will have their salaries calculated according to the higher metropolitan factor.\n\nNow that we know how GitLab got started with local rates, here's a look at why we have continued down this path.\n\n### Standard pay eats away at production and personnel\n\nIf everyone is paid the same role-based salary, the company would not be able to hire as many team members, and those that are brought on would not be as widely distributed, according to Sid. Ultimately, this approach would cut away at GitLab's ability to produce as well as be geographically diverse, he argues.\n\n\"If we pay everyone the San Francisco wage for their respective roles, our compensation costs would increase greatly, and we would be forced to hire a lot fewer people. Then we wouldn’t be able to produce as much as we would like,\" Sid explains. \"And if we started paying everyone the lowest rate possible, we would not be able to retain the people we want to keep.\n\n\"So you end up in a place where the compensation is somewhere in between. And that would cause us to have a concentration of team members in low-wage regions because it’s a better deal for them. They’re getting more than the market rate, so they’re more likely to apply and accept an offer. And they’re more likely to stay regardless of how happy they are, which is not healthy for them or the company.\"\n\n### Standard pay for all roles may not be as fair as it seems\n\nAnother problem with paying everyone the same salary, Sid says, comes down to how far a dollar goes in one place compared to another. If everyone is paid a standard salary, those who live in high-income areas would have less discretionary income when compared to their counterparts in lower-income communities. \n\nRemote companies using a standard pay structure are reportedly running into problems with their compensation plans. \n\n\"The most recent company I talked to has everybody getting paid the same, no matter where they're located. It's very, very different from GitLab, and it is causing problems for them,\" says Barbie. \"We have very strong communication with that company. They're hoping that we can help influence them to move away from paying everyone the same no matter their location. They're finding that it's extremely inequitable.\"\n\n### Closing the gap on local rates for distributed workers\n\nAs remote, or distributed, workplaces continue to take hold and grow across all industries, Sid hopes the location-based compensation gaps will narrow.\n\n\"I think the core difference is there’s people saying, 'Same work, same pay.' And there’s people like us saying we should be at market,\" Sid says. \"I hope the distance between those stances becomes smaller as more companies offer remote work opportunities. I think that’s the way to fix it – just make sure the market rates become higher and consistent.\n\n\"And that’s why we will be promoting remote work a lot. We have a great [page in our handbook about running an all-remote company](/company/culture/all-remote/). Hopefully, that is the way we will contribute to having people across the world get paid the same wages. We will track with that trend; but we won't be ahead of it or behind it. If you see what remote work is doing in a country like the Ukraine, it’s a great source of income for the people there. And I want to contribute to that.\"\n\nStill have questions or thoughts on GitLab's compensation structure? Sound off in the comments below (or on HN, inevitably 😁) or ping us on Twitter [@gitlab](https://twitter.com/gitlab).\n\n[Cover image](https://unsplash.com/photos/uCMKx2H1Y38) by [AbsolutVision](https://unsplash.com/@freegraphictoday) on Unsplash\n{: .note}\n",[810,9,832,873],{"slug":7040,"featured":6,"template":680},"why-we-pay-local-rates","content:en-us:blog:why-we-pay-local-rates.yml","Why We Pay Local Rates","en-us/blog/why-we-pay-local-rates.yml","en-us/blog/why-we-pay-local-rates",{"_path":7046,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7047,"content":7053,"config":7058,"_id":7060,"_type":14,"title":7061,"_source":16,"_file":7062,"_stem":7063,"_extension":19},"/en-us/blog/why-we-use-rails-to-build-gitlab",{"title":7048,"description":7049,"ogTitle":7048,"ogDescription":7049,"noIndex":6,"ogImage":7050,"ogUrl":7051,"ogSiteName":667,"ogType":668,"canonicalUrls":7051,"schema":7052},"Why we use Ruby on Rails to build GitLab","Here's our CEO on GitLab’s inception using Rails, and how challenges are being handled along the way.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668296/Blog/Hero%20Images/gitlab-ruby.jpg","https://about.gitlab.com/blog/why-we-use-rails-to-build-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we use Ruby on Rails to build GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-10-29\",\n      }",{"title":7048,"description":7049,"authors":7054,"heroImage":7050,"date":7055,"body":7056,"category":299,"tags":7057},[2313],"2018-10-29","\nWhen our Co-founder and Engineering Fellow [Dmitriy Zaporozhets](/company/team/#dzaporozhets) decided to build GitLab, he chose to do it with Ruby on Rails, despite working primarily in PHP at the time. GitHub, a source of inspiration for GitLab, was also based on Rails, making it a logical pick considering his interest in the framework. GitLab CEO [Sid Sijbrandij](/company/team/#sytses) thinks his co-founder made a good choice:\n\n\"It's worked out really well because the Ruby on Rails ecosystem allows you to shape a lot of functionality at a high quality,\" he explained. \"If you look at GitLab, it has an enormous amount of functionality. Software development is very complex and to help with that, we need a lot of functionality and Ruby on Rails is a way to do it. Because there's all these best practices that are on your happy path, it’s also a way to keep the code consistent when you ship something like GitLab. You're kind of guided into doing the right thing.\"\n\n### Depending on useful gems\n\nRuby gems play an integral role in the building of GitLab, with it loading more than a thousand non-unique gems, according to Sid. Calling the Ruby on Rails framework \"very opinionated,\" he thinks it's a strong environment in which to build a complex app like GitLab.\n\n\"There's a great ecosystem around it with gems that can make assumptions about how you're doing things and in that regard, I think the Ruby on Rails ecosystem is still without par,\" he says. \"If you look at our Gemfile, it gives you an indication of how big the tower is of dependencies that we can build on. Ruby on Rails has amazing shoulders to stand on and it would have been much slower to develop GitLab in any other framework.\"\n\n### Overcoming challenges\n\nAll of this is not to say there haven’t been challenges in building GitLab with Ruby on Rails. Performance has been an issue that our developers have made strides to improve in a number of ways, including rewriting code in Go and [using the Vue framework](/blog/why-we-chose-vue/). The latter is being used to rewrite frequently accessed pages, like issues and merge requests, so they load faster, improving user experience.\n\nGo is being used to address other issues affecting load times and reduce memory usage.\n\n\"Ruby was optimized for the developer, not for running it in production,\" says Sid. \"For the things that get hit a lot and have to be very performant or that, for example, have to wait very long on a system IO, we rewrite those in Go … We are still trying to make GitLab use less memory. So, we'll need to enable multithreading. When we developed GitLab that was not common in the Ruby on Rails ecosystem. Now it's more common, but because we now have so much code and so many dependencies, it's going to be a longer path for us to get there. That should help; it won't make it blazingly fast, but at least it will use less memory.\"\n\nAdding Go to GitLab’s toolbox led to the creation of a separate service called [Gitaly](/blog/the-road-to-gitaly-1-0/), which handles all Git requests.\n\n### Building on GitLab’s mission\n\nThe organized, structured style of Ruby on Rails’ framework falls in line with our core mission. Because Rails is streamlined, anyone can jump into GitLab and participate, which made it especially attractive to Sid from the start.\n\n\"[Our mission is that everyone can contribute](/company/mission/#mission),\" he explains. \"Because Ruby on Rails is really opinionated about which pieces go where, it's much easier for new developers to get into the codebase, because you know where people have put stuff. For example, in every kitchen you enter, you never know where the knives and plates are located. But with Ruby on Rails, you enter the kitchen and it's always in the same place, and we want to stick to that.\n\n>In every kitchen you enter, you never know where the knives and plates are located. But with Ruby on Rails, you enter the kitchen and it's always in the same place, and we want to stick to that.\n\n\"I was really encouraged when I opened the project and saw it for the first time a year after Dmitriy started it. I opened it up and it's idiomatic Rails. He followed all the principles. He didn't try to experiment with some kind of fad that he was interested in. He made it into a production application. Dmitriy carefully vetted all the contributions to make sure they stick to those conventions, and that's still the case. I think we have a very nice codebase that allows other people to build on top of it. One of our sub-values is [boring solutions](https://handbook.gitlab.com/handbook/values/#efficiency): don't do anything fancy. This is so that others can build on top it. I think we've done that really well … and we're really thankful that Ruby has been such a stable, ecosystem for us to build on.\"\n\n[Cover image](https://unsplash.com/photos/0y6Y56Pw6DA) by [Elvir K](https://unsplash.com/@elvir) on Unsplash\n{: .note}\n",[811,267,3138,9,1295,2396,723],{"slug":7059,"featured":6,"template":680},"why-we-use-rails-to-build-gitlab","content:en-us:blog:why-we-use-rails-to-build-gitlab.yml","Why We Use Rails To Build Gitlab","en-us/blog/why-we-use-rails-to-build-gitlab.yml","en-us/blog/why-we-use-rails-to-build-gitlab",{"_path":7065,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7066,"content":7072,"config":7078,"_id":7080,"_type":14,"title":7081,"_source":16,"_file":7082,"_stem":7083,"_extension":19},"/en-us/blog/without-a-shadow-of-a-doubt",{"title":7067,"description":7068,"ogTitle":7067,"ogDescription":7068,"noIndex":6,"ogImage":7069,"ogUrl":7070,"ogSiteName":667,"ogType":668,"canonicalUrls":7070,"schema":7071},"Without a shadow of a doubt: Inside GitLab's CEO shadow program","Technical marketing manager Tye Davis did everything from joining investor meetings to battling with the flight simulator at GitLab Mission Control.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680653/Blog/Hero%20Images/sfbaybridge.jpg","https://about.gitlab.com/blog/without-a-shadow-of-a-doubt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Without a shadow of a doubt: Inside GitLab's CEO shadow program\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2019-07-11\",\n      }",{"title":7067,"description":7068,"authors":7073,"heroImage":7069,"date":7075,"body":7076,"category":299,"tags":7077},[7074],"Tye Davis","2019-07-11","\n\nWalking up to the iconic Millennium tower on Monday, [I](/company/team/#TyeD19) was a bit nervous for my first day of the [GitLab CEO shadow program](/handbook/ceo/shadow/). Sometimes, our impression of the CEO is someone who is intimidating and strictly business; they only care about things work related. That persona often results from not having access to the CEO, and the fear that one mistake in their presence may cost your job. The GitLab CEO shadow program proved to be a pleasant departure from this mindset.\n\nEntering GitLab “Mission Control,” I was met with a large apartment turned into a hybrid boardroom with a touch of living space. This is a unique working environment because GitLab is an [all-remote](/company/culture/all-remote/) company that allows GitLab team members to work from their choice of location (home, coffee shop, [van](/blog/how-remote-work-at-gitlab-enables-location-independence/), shared workspaces, surfboard, etc.). So, although you are physically at \"Mission Control,\" most of the CEO shadow program is done via video conferencing. There is no need to go from physical meeting room to meeting room, you simply go from conference call to conference call (woo efficiency!). Six monitors add to the office-like feel of the living room, displaying (amazing) sales data, locations of team members in over 50+ countries, and the DevOps toolchain landscape that GitLab replaces. The boardroom also offers access to gaming systems, a [flight simulator](https://en.wikipedia.org/wiki/X-Plane_(simulator)) and readily available drinks and snacks. I was able to calm my excitement and I settle into the room with the fellow CEO shadow, [Mayank](/company/team/#mayanktahil).\n\n![Mission Control center](https://about.gitlab.com/images/blogimages/ceoshadow_graphs.jpg){: .shadow.medium.center}\nInside GitLab's \"Mission Control.\"\n{: .note.text-center}\n\n### Hitting the ground running\n\nMy first face-to-face meeting with CEO [Sid Sijbrandij](/company/team/#sytses) was on our first CEO-specific call of the day, a public live stream on \"Sid's three biggest remote work challenges\" with [Leo Widrich](https://twitter.com/leowid), co-founder of Buffer. This was the first ice breaker into the CEO shadow program and helped me understand just how inclusive the shadow program is. Sid really made us feel like we belonged on the call by incorporating us into the discussion. His inclusivity lowered my stress a few notches, and I began to understand what was to come in the next few weeks: [transparency](https://handbook.gitlab.com/handbook/values/#transparency).\n\nThe second meeting took the inclusivity of the program a step further, as we joined a group call with the executive team from across the GitLab organization (aka the [E-group](/company/team/structure/#e-group)). You might expect some hesitation in allowing someone who is not an executive to join a meeting where top-level matters are discussed, but the CEO shadow program was made exactly for these types of experiences. The program gives participants full visibility into every working part of building an enterprise company. There was no resistance from the E-group team and upon joining the meeting, I was met with an overwhelming ‘welcome’ to our working session. This alleviated most of my nervousness and truly showcased GitLab’s [collaboration value](https://handbook.gitlab.com/handbook/values/#collaboration) by displaying ‘no ego’ and ‘kindness’ from the executive team.\n\n### The feeling of welcomeness was constant\n\n There were very few circumstances where Mayank and I were not included in meetings due to the sensitivity of the subject. The most eye-opening experience for me was meeting with potential investors in GitLab that represent some of the largest and best-known investment firms in the world. These organizations discussed topics around GitLab’s vision and technology and the firms said they see the incredible upside of GitLab. If I was only able to attend one meeting during the whole program, I would choose this one. My confidence in the direction this company is taking has increased after seeing firsthand how much interest there is from investors in GitLab’s growth. Observing the amount of planning leading up to these meetings between Sid and [Paul, our CFO](/company/team/#pmachle) was a great learning experience. Investors are excited about the future of GitLab as a result of all of the hard work of every GitLab team member. My only disappointment is that the program is only two weeks long and that I won’t get to continue to be part of these developing relationships.\n\n![Shadowing the CEO](https://about.gitlab.com/images/blogimages/tyeshadowingceo.jpg){: .shadow.small.center}\nDoing my best impression of shadowing the CEO's activity in his everyday engagements\n{: .note.text-center}\n\n### Takeaways\n\nThe shadow program was an incredibly enlightening experience. Joining this program gave me an accurate and deeply intuitive understanding of the life of a CEO. Sid has mastered the high velocity of responsibility and full situational awareness that is needed to effectively lead our company as CEO. He also acknowledges that he always has room for improvement – so much so that he has a section of flaws that are listed on the GitLab [CEO handbook page](/handbook/ceo/#flaws). One big takeaway from the shadow program is listed on the CEO page. This is something I believe is the biggest factor to collaborate effectively is what Sid notes about his approach, “Not a flaw but something to know about me, I have [strong opinions weakly held](https://blog.codinghorror.com/strong-opinions-weakly-held/). Or, as someone said, I come in hot but am open to new evidence.” This is applicable across the company (and personally) as we all [iteratively](https://handbook.gitlab.com/handbook/values/#iteration) build a tool that best fits our customer needs, and we must be receptive of adjusting accordingly if new evidence corrects our product vision.\n\nBusiness aside, Sid has some pretty funny GitLab stories. If you ever get the chance to ask him about Burning Man, I promise it’ll be a good laugh. My time in the CEO shadow program was unique, educational, and inspirational. I am thankful for this opportunity and hope that one day I’ll reciprocate in a future exec role. Extra shout out to [Cheri](/company/team/#cheriholmes) who coordinates diligently so that all of us CEO shadows are set up for success. Looking back, the most stressful part of the CEO shadow program was the anxiety the X-Plane flight simulator brought when trying to land an airplane (the landing didn't go well).\n\nPhoto by [Landry Gapangwa](https://unsplash.com/@gapangwa91?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/@gapangwa91?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,873,832],{"slug":7079,"featured":6,"template":680},"without-a-shadow-of-a-doubt","content:en-us:blog:without-a-shadow-of-a-doubt.yml","Without A Shadow Of A Doubt","en-us/blog/without-a-shadow-of-a-doubt.yml","en-us/blog/without-a-shadow-of-a-doubt",{"_path":7085,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7086,"content":7092,"config":7097,"_id":7099,"_type":14,"title":7100,"_source":16,"_file":7101,"_stem":7102,"_extension":19},"/en-us/blog/working-at-gitlab-affects-my-life",{"title":7087,"description":7088,"ogTitle":7087,"ogDescription":7088,"noIndex":6,"ogImage":7089,"ogUrl":7090,"ogSiteName":667,"ogType":668,"canonicalUrls":7090,"schema":7091},"How working at GitLab has changed my view on work and life","A glimpse of the things I've learned at GitLab since I joined.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678678/Blog/Hero%20Images/gitlab-effects.jpg","https://about.gitlab.com/blog/working-at-gitlab-affects-my-life","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How working at GitLab has changed my view on work and life\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hazel Yang\"}],\n        \"datePublished\": \"2018-03-15\",\n      }",{"title":7087,"description":7088,"authors":7093,"heroImage":7089,"date":7094,"body":7095,"category":808,"tags":7096},[4179],"2018-03-15","\nI will have been at GitLab for two years in June of this year. Working at GitLab is a fresh experience for me. Joining a company outside of Asia and working 100 percent remotely was not something that I had previously done. It not only affects my work but my entire life. I am so grateful to have the opportunity to work with talented and friendly people around the world. I think it would be good to share my reflections about what I’ve learned during this 19-month journey.\n\n\u003C!-- more -->\n\nWe have an open source [handbook](/handbook/) that everyone can access, and it includes our six [values](https://handbook.gitlab.com/handbook/values/), (CREDIT) which support our everyday work. Keeping these values in mind benefits me a lot both in my work and in my life, and I would love to share them with you here:\n\n### Expressing oneself completely, clearly and without reservation\n\nCollaboration is essential in our everyday work. At GitLab, we prefer asynchronous communication instead of synchronous communication since we are spread around the world, from America, Europe, Africa, to Asia. We rely on text-based communication heavily. However, words are cold without the body language support, and they could easily lead to misunderstanding and conflict. So how we express our thoughts clearly and kindly in text becomes crucial.\n\nAfter joining GitLab, I always think twice before sending out messages or comments, even in my personal life. I started to choose my words more carefully both in English and Chinese. I also have tried to explain as much as possible. I found that if I did these two things, I can avoid the misunderstanding and increase the efficiency of communication. The most important thing is that people feel comfortable while discussing with you in the text. So don't be afraid to completely express your thoughts, in a careful and sensitive manner.\n\n### Don't be shy to show your gratitude\n\nWe have [\"Say thanks\"](/handbook/communication/#say-thanks) in our [values](https://handbook.gitlab.com/handbook/values/), and we often say \"Thank you\" to each other, especially in our \"Thanks\" channel on Slack.\n\n{: .text-center}\n![graphic-gratitude](https://about.gitlab.com/images/blogimages/working-at-gitlab/gratitude.png){:height=\"480px\" width=\"680px\"}\n\nDue to my personality and culture, at first I was shy to express my appreciation to my friends, family, and colleagues. At GitLab, we have a unique culture that encourages people to say “thanks,” so I try not to be too shy to show my gratitude. As I practiced this more and more, it became a habit and a natural thing to me. Now I say “thanks” very often, even for little things, and it feels positive and makes me happy every day.\n\nExpressing gratitude not only makes me feel satisfied, it also makes the person that I expressed my appreciation for have a beautiful mood.\n\n### Learning from failure\n\n\"Iteration\" is critical to our product improvement and development. We see what each of us produce initially as a draft. This helps us reduce the cycle time and have a prototyping mindset towards the features we are working on. We are not afraid of failure since we are always flexible in adjusting our products based on the feedback from both our external and internal communities.\n\n{: .text-center}\n![graphic-iterations](https://about.gitlab.com/images/blogimages/working-at-gitlab/iterations.png){:height=\"480px\" width=\"680px\"}\n\nI have applied this mindset to my personal life as well. In my culture, we value the smart person who never makes mistakes. So we try as hard as possible to avoid errors and losing face. However, the prototyping mindset changed my thoughts and reactions towards the things that previously may have made me feel embarrassed or uncomfortable. I became more open-minded in accepting positive and negative feedback from others. I no longer get upset or offended if someone corrects something that I did. I realized that my life is also a kind of product and it will be better and better in every iteration.\n\n### Trust your team and grow with them\n\nWhen you trust your team members, you will be brave enough to leave your comfort zone because you believe they will give you the support whenever you need it.\n\nA good example of trust concerns my English. English is my second language and therefore it is a weakness of mine. When you lack confidence in something, you often refuse to do the things outside of your comfort zone as you fear it would make you look stupid. This was exactly my situation when I joined GitLab. However, when I realized that the people around me weren’t as concerned about my shortcomings in English as much as they valued me for my contributions to the company. It gives me the courage to face my linguistic challenges.\n\nI am still not 100 percent as confident in English as I am in Mandarin, yet my confidence has increased from 30 percent to almost 70 percent if one puts a number to it. As you can see, I am writing this blog post in English to share my experience at GitLab now. This is only my second blog post.\n\nGitLab provides a very positive environment where I can improve and grow professionally as well as personally. I appreciate that my colleagues are always supportive and patient. I feel safe and comfortable while doing challenging things, not just concerning my English but in all of the tasks that I face at GitLab.\n\n### Befriend your manager and colleagues\n\nI felt that it was harder to befriend managers and colleagues at a company in Asia. I am not the sure what the reason is, but I think perhaps it is because of Confucianism which impacts our culture a lot.\n\nAt GitLab, I speak freely about numerous things to my manager, [Sarrah Vesselov](/company/team/#SVesselov), since I know she cares about our team and wants our team to grow. I also feel that GitLab is like a big family even though we are a large and distributed team. We try as hard as we can to get people together in both virtual and practical ways.\n\n{: .text-center}\n![image-summit](https://about.gitlab.com/images/blogimages/working-at-gitlab/summit.png){:height=\"480px\" width=\"680px\"}\n\nFor example, we have the [team call](/handbook/communication/#team-call), and people can share a bit about their lives. We also encourage our team members to join the [\"virtual coffee breaks\"](https://work.qz.com/1147877/remote-work-why-we-put-virtual-coffee-breaks-in-our-company-handbook/) to get to know each other. Moreover, we host a [summit](/events/gitlab-contribute/) to get together in person every nine months. This year we will meet in [Cape Town, South Africa](https://gitlab.com/summits/2018-Summit).\n\n### Embrace diversity\n\nGitLab promotes [diversity](/company/culture/inclusion/) and hires globally. We believe \"Culture add\" much more than \"Culture fit.\" We include different race, color, religion, gender, national origin, age, disability, or genetics. We also support inclusive benefits, for instance, [Transgender Medical Services](/handbook/total-rewards/benefits/general-and-entity-benefits/inc-benefits-us/) and [Pregnancy and Maternity Care](/handbook/total-rewards/benefits/general-and-entity-benefits/#parental-leave). We have a LGBTQ+ channel on Slack as well. Embracing differences powers our creativity.\n\n{: .text-center}\n![graphic-diversity](https://about.gitlab.com/images/blogimages/working-at-gitlab/diversity.png){:height=\"480px\" width=\"680px\"}\n\nWorking with people from diverse backgrounds is fantastic. I have learned from others’ communicative styles and different ways of thinking. I have broadened my views and now see the world from different perspectives. I am much more open-minded. The most important thing is that I completely understand that we are equal regardless of who we are.\n\n## Conclusion\n\nWorking at GitLab is a unique experience for me. I feel excited to start my work every day and enjoy the job I am doing.\n\nFor those that may be interested in working at Gitlab, we are currently hiring people from everywhere. If you want to join the journey, you can check out our [jobs](/jobs/) page and feel free to apply for the position if you feel that you are qualified. We are looking forward to hearing from you!\n",[9,811,810],{"slug":7098,"featured":6,"template":680},"working-at-gitlab-affects-my-life","content:en-us:blog:working-at-gitlab-affects-my-life.yml","Working At Gitlab Affects My Life","en-us/blog/working-at-gitlab-affects-my-life.yml","en-us/blog/working-at-gitlab-affects-my-life",{"_path":7104,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7105,"content":7111,"config":7116,"_id":7118,"_type":14,"title":7119,"_source":16,"_file":7120,"_stem":7121,"_extension":19},"/en-us/blog/working-on-two-git-branches-at-the-same-time",{"title":7106,"description":7107,"ogTitle":7106,"ogDescription":7107,"noIndex":6,"ogImage":7108,"ogUrl":7109,"ogSiteName":667,"ogType":668,"canonicalUrls":7109,"schema":7110},"How to work on two Git branches at the same time","Watch the demo on how using the GitLab Web IDE and your local dev environment to work on two branches at once can help save time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678782/Blog/Hero%20Images/working-on-two-git-branches-at-the-same-time.jpg","https://about.gitlab.com/blog/working-on-two-git-branches-at-the-same-time","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to work on two Git branches at the same time\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2018-10-03\",\n      }",{"title":7106,"description":7107,"authors":7112,"heroImage":7108,"date":7113,"body":7114,"category":743,"tags":7115},[3074],"2018-10-03","\nI was recently using both my local development environment and the GitLab [Web IDE](/blog/introducing-gitlab-s-integrated-development-environment/), and found a really nice workflow for working with two Git branches simultaneously.\n\n### The problem\n\nIn this scenario, you’re doing development work on one branch, in one part of your codebase, and then likely documenting your process in another place. I really don’t want all of this in one merge request, because I don’t want to delay shipping the development work if [the docs](https://docs.gitlab.com) aren’t done. I want to be able to get it live so that others can see it, give feedback on each individual component, and iterate on it. At the same time, I don’t want to delay too long on documenting the process, because I want the docs to be as accurate and reproducible as possible.\n\n### The fix\n\nWhile doing my development work in my local development environment, I created another merge request for the documentation using the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/), essentially working on two different Git branches at the same time, using two different editors.\n\nIn my quick example below, you can see a merge request to add Jenkins content to our [DevOps tools](/competition/) page. I’ve checked out this branch locally, and I have it open in my Atom editor. I’ve been doing some work by updating `features.yml`, as well as a Markdown file and a Haml file. All of these changes are related to one merge request. While I’m committing changes locally to the comparison page, I’m documenting each step in my Web IDE in a separate tab, to make sure my instructions are precise, helpful, and completed in real time.\n\n### Watch the demo\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uV3ycYnwhBc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nYou can see what we've got planned for the Web IDE in 2019 in our post about [our product vision for DevOps Create](/blog/create-vision/).\n\nWhat are other ways the Web IDE has come in handy for you? Let us know by tweeting us [@gitlab](https://twitter.com/gitlab)!\n\nCover [photo](https://unsplash.com/photos/3y1zF4hIPCg) by [Hans-Peter Gauster](https://unsplash.com/photos/3y1zF4hIPCg) on Unsplash\n{: .note}\n",[993,677,1297,9,723],{"slug":7117,"featured":6,"template":680},"working-on-two-git-branches-at-the-same-time","content:en-us:blog:working-on-two-git-branches-at-the-same-time.yml","Working On Two Git Branches At The Same Time","en-us/blog/working-on-two-git-branches-at-the-same-time.yml","en-us/blog/working-on-two-git-branches-at-the-same-time",{"_path":7123,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7124,"content":7129,"config":7134,"_id":7136,"_type":14,"title":7137,"_source":16,"_file":7138,"_stem":7139,"_extension":19},"/en-us/blog/working-remotely-with-children-at-home",{"title":7125,"description":7126,"ogTitle":7125,"ogDescription":7126,"noIndex":6,"ogImage":5668,"ogUrl":7127,"ogSiteName":667,"ogType":668,"canonicalUrls":7127,"schema":7128},"How to make your home a space that works with kids","Here's our best advice on making your home/work space work for you and your kids.","https://about.gitlab.com/blog/working-remotely-with-children-at-home","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make your home a space that works with kids\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean McGivern\"}],\n        \"datePublished\": \"2019-08-01\",\n      }",{"title":7125,"description":7126,"authors":7130,"heroImage":5668,"date":7131,"body":7132,"category":808,"tags":7133},[4083],"2019-08-01","\n\n_In part three of our series on working remotely with children we look at how GitLab\nteam members literally make their homes work for them while children are around.\nIn part one of our series we examined [maternity/paternity leave polices around\nthe world](/blog/how-is-it-being-a-new-mom-working-for-gitlab/) and in part two Jarka Košanová shared her experiences while\n[working as a new mother](/blog/balancing-career-and-baby/)._\n\nAt [GitLab Contribute 2019](/blog/contribute-wrap-up/) in New Orleans,\nwe had an unconference\nsession about working remotely with children at home. The\nfacilitators were [Lyle Kozloff][lyle] and myself, [Sean\nMcGivern][smcgivern]. Not surprisingly, the four sessions generated a lot of good ideas.\nThe participants had all ages of children from\n'not yet, but thinking about it' to older teenagers. They also worked in\ndifferent functions at GitLab and had different tenures – some people\nhad been at GitLab for years while others had just joined the week of\nContribute. And others were community contributors or partners of GitLab team members.\n\nNo conversation about working at home with kids can fail to include ideas about how\nto structure the space. To make it all work, it's important to be creative.\n\n## Make use of what's available\n\n> I'd never had a remote job before and I didn't realize just how loud my daughter was.\nI got a noise-cancelling microphone because my daughter is in the next room to me. – [_Désirée Chevalier, test automation engineer_][dchevalier2]\n\n> I have an open-plan kitchen/dining/living room, which looks nice, but with my kids around\nit's pretty much impossible to work from any of these areas. I'm planning to try making the\nloft \"the office,\" but I haven't done it yet. – [_Marcel Amirault, technical writer_][Ravlen]\n\nIf you don't have a large house or apartment, you might need to think outside\nthe box when it comes to managing your space. And things can change again as your\nchildren age or if you have more children. Even having a room solely\nfor work might come with some additional challenges!\n\n## Designate spaces clearly\n\n> We have a one-bedroom apartment and I mostly work in the living room. When I take\ncalls I go into the bedroom. We involved the kids in the planning about communication.\nThe bedroom door has a sign with an X or an O on it. If there's an O they can come in, grab\nsomething, and close the door behind them. If there's an X they can't come in for any reason.\nWhen we moved in my son was still three, and it worked for the later stages of three –\nespecially because he was involved. – [_Lyle Kozloff, support engineering manager_][lyle]\n\n![Minimum Viable Product for indicating space usage](https://about.gitlab.com/images/blogimages/mvp-presence-signs.jpg){: .shadow.medium.center}\nHow one team member communicates whether or not he can be interrupted.\n{: .note.text-center}\n\nIf you need to be uninterrupted, it's important that that is very clear\nto everyone else – especially the children. Having a dedicated space is\ngreat, but even a shared space can be turned into a dedicated space for\nsome of the time before becoming a shared space again later.\n\n## Get out of the house if you need to\n\n> I find it better to set boundaries ahead of time instead of reacting to things that are happening.\nFour or five times a month I will work from a coffee shop to help enforce that too. – [_Mike Greiling,\nsenior frontend engineer_][mikegreiling]\n\n> I used to have a dedicated room then it became my son's room. Then I moved to the\nentrance hallway, because it's big and there was room for a desk. I tried it for one year, but\nmy wife and child were always coming past. I've started going to a coworking space. It feels\nlike a failure because I don't stay home, but it works best for us. – [_Alessio Caiazza, senior backend engineer_][nolith]\n\nThis is not a failure at all! Everyone has to do what they need to do for their\nown circumstances. [Working remotely doesn't necessarily mean working from\nhome](/company/culture/all-remote/#what-all-remote-does-not-mean), and stressed\nparents are not going to be able to be at their best.\n\n_In part four of our series we have advice on everything from time management to relationships._\n\n[dchevalier2]: /company/team/#dchevalier2\n[lyle]: /company/team/#lkozloff\n[mikegreiling]: /company/team/#mikegreiling\n[nolith]: /company/team/#nolith\n[Ravlen]: /company/team/#ravlen1\n[smcgivern]: /company/team/#mcgivernsa\n\nPhoto by [Baby Natur](https://unsplash.com/@babynatur?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/kids-toys?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[832,810,9],{"slug":7135,"featured":6,"template":680},"working-remotely-with-children-at-home","content:en-us:blog:working-remotely-with-children-at-home.yml","Working Remotely With Children At Home","en-us/blog/working-remotely-with-children-at-home.yml","en-us/blog/working-remotely-with-children-at-home",{"_path":7141,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7142,"content":7148,"config":7153,"_id":7155,"_type":14,"title":7156,"_source":16,"_file":7157,"_stem":7158,"_extension":19},"/en-us/blog/year-of-kubernetes",{"title":7143,"description":7144,"ogTitle":7143,"ogDescription":7144,"noIndex":6,"ogImage":7145,"ogUrl":7146,"ogSiteName":667,"ogType":668,"canonicalUrls":7146,"schema":7147},"What we learned after a year of GitLab.com on Kubernetes","It's been one year since we moved GitLab.com to Kubernetes. We unpack the challenges and learnings from this major migration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681569/Blog/Hero%20Images/nico-e-AAbjUJsgjvE-unsplash.jpg","https://about.gitlab.com/blog/year-of-kubernetes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What we learned after a year of GitLab.com on Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jarvis\"}],\n        \"datePublished\": \"2020-09-16\",\n      }",{"title":7143,"description":7144,"authors":7149,"heroImage":7145,"date":7150,"body":7151,"category":743,"tags":7152},[3325],"2020-09-16","\n\nFor about a year now, the infrastructure department has been working on migrating all services that run on GitLab.com to Kubernetes. The effort has not been without challenges, not only with moving services to Kubernetes but also managing a hybrid deployment during the transition. We have learned a number of lessons along the way that we will explore in this post.\n\nSince the very beginning of GitLab.com, servers for the website have run in the cloud on virtual machines. These VMs are managed by Chef and installed using our [official Linux package](/install/#ubuntu).\nWhen an application update is required, [our deployment strategy](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/deploy/gitlab-com-deployer.md) is to simply upgrade fleets of servers in a coordinated rolling fashion using a CI pipeline.\nThis method, while slow and a bit [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions), ensures that GitLab.com is using the same installation methods and configuration as our self-managed customers who use Linux packages.\nWe use this method because it is especially important that any pain or joy felt by the community when installing or configuring self-managed GitLab is also felt by GitLab.com.\nThis approach worked well for us for a time but as GitLab.com has grown to hosting over 10 million projects we realized it would no longer serve our needs for scaling and deployments.\n\n## Enter Kubernetes and cloud native GitLab\n\nWe created the [GitLab Charts](https://gitlab.com/gitlab-org/charts) project in 2017 to prepare GitLab for deployments in the cloud and enable self-managed users to install GitLab into a Kubernetes cluster. We knew then that running GitLab.com on Kubernetes would benefit the SaaS platform for scaling, deployments, and efficient use of compute resources. At the time though there were still many application features that depended on NFS mounts that delayed our migration off of VMs.\n\nThe push for cloud native and Kubernetes gave engineering an opportunity to plan a gradual transition that removes some of the network storage dependencies on the application while continuing to develop new features. Since we started planning the migration in the summer of 2019, most of these limitations have been resolved and the journey to running all of GitLab.com on Kubernetes is now well underway!\n\n## Running GitLab.com on Kubernetes\n\nFor GitLab.com we use a single regional GKE cluster that services all application traffic. To minimize the complexity of the (already complex) migration we focus on services that don't depend on local storage or NFS. While GitLab.com is running from mostly monolithic Rails codebase, we route traffic depending on workload characteristics to different endpoints which are isolated into their own node pools.\n\nOn the frontend these types are divided into web, API, git SSH/HTTPs requests, and Registry.\nOn the backend we divide our queued jobs into different characteristics depending on [predefined resource boundaries](/blog/scaling-our-use-of-sidekiq/) that allow us to set Service-level Objective (SLO) targets for a range of different workloads.\n\nAll of these GitLab.com services are configured with the unmodified GitLab Helm chart, which configures them in sub-charts that can be selectively enabled as we gradually migrate services to the cluster.\nWhile we opted to not include some of our stateful services such as Redis, Postgres, GitLab Pages, and Gitaly, when the migration to Kubernetes is finished it will drastically reduce the number of VMs that we currently manage with Chef.\n\n## Transparency and managing the Kubernetes configuration\n\nAll configuration is managed in GitLab itself in three configuration projects using Terraform and Helm.\nWhile we use GitLab to run GitLab wherever possible, we maintain a separate GitLab installation for operations.\nThis is done to ensure we do not depend on the availability of GitLab.com for deployments and upgrades of GitLab.com.\n\nEven though our pipelines that execute against the Kubernetes cluster run on this separate GitLab deployment, the code repositories are mirrored and publicly viewable at the following locations:\n\n* [k8s-workloads/gitlab-com](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com): GitLab.com configuration wrapper for the GitLab Helm chart.\n* [k8s-workloads/gitlab-helmfiles](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-helmfiles/): Contains the configuration for services that are not directly related to the GitLab application. This includes configurations for cluster logging and monitoring and integrations like PlantUML.\n* [gitlab-com-infrastructure](https://gitlab.com/gitlab-com/gitlab-com-infrastructure): Terraform configuration for the Kubernetes and legacy VM infrastructure. All the resources necessary to run the cluster are configured here, including the cluster, node pools, service accounts, and IP address reservations.\n\n[![hpa](https://about.gitlab.com/images/blogimages/a_year_of_k8s/hpa.png)](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/315#note_390180361)\nWhenever a change is proposed, a public [short summary](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/315#note_390180361) is displayed, with a link to detailed diff that an SRE reviews before applying changes to the cluster.\n{: .note.text-center}\n\nFor SREs, we link to a detailed diff on our operations GitLab instance that has limited access.\nThis allows employees and the community, who do not have access to the operational project which is limited to SREs, to have visibility into proposed config changes.\nBy having a public GitLab instance for code, and a private instance for [CI pipelines](/features/continuous-integration/), we are able to keep a single workflow while at the same time ensuring we don't have a dependency on GitLab.com for configuration updates.\n\n## The lessons we learned along the way\n\nWe have learned a few things along the way, lessons that we are applying to future migrations and new deployments into Kubernetes.\n\n### Increased billing from cross-AZ traffic\n\n![git egress](https://about.gitlab.com/images/blogimages/a_year_of_k8s/git_egress.png)\nDaily egress bytes/day from the Git storage fleet on GitLab.com.\n{: .note.text-center}\n\nGoogle divides its network into regions and regions are divided into availability zones (AZs).\nBecause of the large amount of bandwidth required for Git hosting, it is important we are cognizant of network egress. For internal network traffic, egress is only free-of-charge if it remains in a single AZ.\nAt the time of writing this blog post, we deliver approximately 100TB on a typical work day for just Git repositories.\nOn legacy VM topology, services that were previously colocated on the same VMs are now running in Kubernetes pods.\nThis mean some network traffic that was previously local to a VM can now potentially traverse availability zones.\n\nRegional GKE clusters provide the convenience of spanning multiple availability zones for redundancy.\nWe are considering [splitting the regional GKE cluster into single zonal clusters](https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/1175) for services that use a lot of bandwidth to avoid network egress charges while maintaining redundancy at the cluster level.\n\n### Resource limits, requests, and scaling\n\n![replicas](https://about.gitlab.com/images/blogimages/a_year_of_k8s/replicas.png)\nNumber of replicas servicing production traffic on registry.gitlab.com, Registry traffic reaches it peak at ~15:00UTC.\n{: .note.text-center}\n\nOur migration story began in August 2019 when we migrated the GitLab Container Registry to Kubernetes, the first service to move.\nThough this was a critical and high traffic service, it was a good choice for the first migration because it is a stateless application with only a few external dependencies.\nThe first challenge we experienced was the large number of evicted pods, due to memory constraints on our nodes.\nThis required multiple changes to requests and limits. We found that with an application that increases its memory utilization over time, low requests (which reserves memory for each pod) and a generous hard limit on utilization was a recipe for node saturation and a high rate of evictions.\nTo adjust for this [we eventually decided to use higher requests and lower limit](https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/998#note_388983696) which took pressure off of the nodes and allowed pods to be recycled without putting too much pressure on the node.\nAfter experiencing this once, we start our migrations with generous requests and limits that are close to the same value, and adjust down as needed.\n\n### Metrics and logging\n\n![registry-general](https://about.gitlab.com/images/blogimages/a_year_of_k8s/registry-general.png)\nThe Infrastructure department focuses on latency, error rates and saturation that have [Service-level objectives (SLOs)](https://en.wikipedia.org/wiki/Service-level_objective) that tie into our [overall system availability](https://dashboards.gitlab.net/d/general-slas/general-slas?orgId=1).\n{: .note.text-center}\n\nOver the past year, one of the major changes in the infrastructure department was improvements to how we monitor and manage SLOs.\nSLOs allowed us to set targets on individual services which were monitored closely during the migration.\nYet even with this improved observability, we can't always see problems right away with our metric reporting and alerting.\nFor example, focusing on latency and error rates may not adequately cover all uses of the service that is being migrated.\nWe discovered this problem very early with some of the workloads that were moved into the cluster. This challenge was particularly acute when we had to validate features that do not receive many requests but have very specific configuration dependencies.\nOne of the key migration lessons was to also evaluate more than just monitoring metrics, but also logs, and the long-tail of errors in our monitoring.\nNow for every migration we include a detailed list of log queries and plan a clear rollback procedures that can be handed off from one shift to the next in case of issues.\n\nServing the same requests on legacy VM infrastructure and Kubernetes simultaneously presented a unique challenge.\nUnlike a lift-and-shift migration, running on legacy VMs and Kubernetes at the same time requires that our observability is compatible with both and combines metrics into one view.\nMost importantly, we are using the same dashboards and log queries to ensure the observability is consistent during the transition period.\n\n### Shifting traffic to the new cluster\n\nFor GitLab.com we maintain a segmentation of our fleet named the [canary stage](/handbook/engineering/#canary-testing).\nThis canary fleet services our internal projects, [or can be enabled by users](https://next.gitlab.com), and is deployed to first for infrastructure and application changes.\nThe first service we migrated started with taking limited traffic internally and we are continuing to use this method to ensure we are meeting our SLOs before committing all traffic to the cluster.\nWhat this means for the migration is requests to internal projects are first routed to Kubernetes and then we slowly move other traffic to the cluster using HAProxy backend weighting.\nWe learned in the process of moving from VMs to Kubernetes that it was extremely beneficial for us to have an easy way to move traffic between the old and new infrastructure, and to keep legacy infrastructure available for rollback in the first few days after the migration.\n\n### Reserved pod capacity and utilization\n\nOne problem we identified early was, while our pod start times for the Registry service were very short, our start times for Sidekiq took as long as [two minutes](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/1775).\nThe long Sidekiq start times posed a challenge when we started moving workloads to Kubernetes for workers that need to process jobs quickly and scale fast.\nThe lesson here was while the Horizontal Pod Autoscaler (HPA) works well in Kubernetes for adapting to increased traffic, it is also important to evaluate workload characteristics and set reserved pod capacity, especially for uneven demand.\nIn our case, we saw a sudden spike in jobs which caused a large scaling event which saturated CPU before we could scale the node pool.\nWhile it is tempting to squeeze as much as possible out of the cluster, after experiencing some initial performance problems we now start with a generous pod budget and scale down later, while keeping a close eye on SLOs.\nThe pod start times for Sidekiq service have improved significantly and now average about 40 seconds. [Improving the pod start times](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/1775) benefited GitLab.com as well as all the self-managed customers using the official GitLab Helm chart.\n\nAfter transitioning each service, we enjoyed many benefits of using Kubernetes in production, including much faster and safer deploys of the application, scaling, and more efficient resource allocation.\nThe migration benefits extend beyond GitLab.com. With each improvement of the official Helm chart, we provide additional benefits to our self-managed customers.\n\nWe hope you enjoyed reading about our Kubernetes migration journey. As we continue to migrate more services to the cluster you can read more at following links:\n\n* [Why are we migrating to Kubernetes?](/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/)\n* [GitLab.com on Kubernetes](/handbook/engineering/infrastructure/production/architecture/#gitlab-com-on-kubernetes)\n* [Tracking epic for the GitLab.com Kubernetes Migration](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/112)\n\nCover image by [Nico E.](https://unsplash.com/@xnico) on [Unsplash](https://www.unsplash.com/)\n{: .note}\n\n## Read more on Kubernetes:\n\n- [How to install and use the GitLab Kubernetes Operator](/blog/gko-on-ocp/)\n\n- [Threat modeling the Kubernetes Agent: from MVC to continuous improvement](/blog/threat-modeling-kubernetes-agent/)\n\n- [How to deploy the GitLab Agent for Kubernetes with limited permissions](/blog/setting-up-the-k-agent/)\n\n- [A new era of Kubernetes integrations on GitLab.com](/blog/gitlab-kubernetes-agent-on-gitlab-com/)\n\n- [Understand Kubernetes terminology from namespaces to pods](/blog/kubernetes-terminology/)\n",[9,2396],{"slug":7154,"featured":6,"template":680},"year-of-kubernetes","content:en-us:blog:year-of-kubernetes.yml","Year Of Kubernetes","en-us/blog/year-of-kubernetes.yml","en-us/blog/year-of-kubernetes",{"_path":7160,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7161,"content":7166,"config":7171,"_id":7173,"_type":14,"title":7174,"_source":16,"_file":7175,"_stem":7176,"_extension":19},"/en-us/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles",{"title":7162,"description":7163,"ogTitle":7162,"ogDescription":7163,"noIndex":6,"ogImage":5062,"ogUrl":7164,"ogSiteName":667,"ogType":668,"canonicalUrls":7164,"schema":7165},"You’re hired! Two GitLab contributors turn their success into full-time engineering roles","As we continue to celebrate the 10th anniversary of the first commit to GitLab, here’s a look at how two highly active community members became enthusiastic team members.","https://about.gitlab.com/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"You’re hired! Two GitLab contributors turn their success into full-time engineering roles\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-11-12\",\n      }",{"title":7162,"description":7163,"authors":7167,"heroImage":5062,"date":7168,"body":7169,"category":1340,"tags":7170},[950],"2021-11-12","[Greg Myers](https://gitlab.com/greg) and [Rajendra Kadam](https://gitlab.com/rkadam3) have something beyond their engineering roles at GitLab in common – both started out as GitLab contributors. We wanted to share their stories as part of our celebration around the 10th anniversary of the first commit to GitLab.\n\nMyers, a GitLab Senior Support Engineer, says his contributions started in 2018, when he first found his passion for helping other community forum members. \n\n“Most of my early contributions involved helping people set up, configure, and troubleshoot self-hosted GitLab installations,” Myers says.\n\nHe enjoyed this helper role so much he applied for an engineering position, but failed the technical interview and didn’t receive an offer. “I kept contributing to GitLab and helping others in the forum while I leveled up in my weak areas,” he says.\n\nKadam, a GitLab Back-end Engineer and [GitLab hero](/community/heroes/members/), started contributing to GitLab in Jan 2020 to learn more about Ruby on Rails and apply it to his then-workplace. \n\n“I did not stop after that since it is more than the code. I loved working with people at GitLab and the culture, even though I was not a full-time team member,” Kadam says.\n\nLike Kadam, Myers enjoyed being a part of the GitLab community. “The majority of my ‘code’ contributions back then were quite simple – fixing typos and markdown formatting issues in documentation,” he says. “I'd never contributed to an open source project of this size and caliber, and I was impressed by how easy and smooth it was to get involved and contribute.”\n\nHe remembers feeling “star-struck” when GitLab co-founder Dmitriy Zaporozhets personally responded in the comments to one of his first MRs.\n\nUsing what he learned as a contributor, Kadam earned a promotion from his employer. He went on to participate in [GitLab hackathons](/community/hackathon/), winning three in a series. His prominence in the GitLab community led him to be offered and to accept an internal engineering role in February 2021. Kadam blogged about the journey from being a contributor to a team member [on Medium](https://rajendraak.medium.com/how-i-got-a-job-at-gitlab-a3515214b74b).\n\nMyers, meanwhile, feeling more confident about his skills, took another shot at a team member role. “After four months, I reapplied for the support engineer position, and this time I got the job. Now it is my job to help others with GitLab and contribute to GitLab, and I love what I do,” Myers says.\n\nAs a Developer Relations Support counterpart, he helps others in the GitLab community forum and advocates for the GitLab wider community. And, as a GitLab Open Source Support Liaison, “I give back to open source communities I know and love,” he says.\n\nHe encourages others to not only contribute to the GitLab community but to help other forum members as he did. After all, you never know where those contributions can lead. “Being a GitLab community member and contributor led me to my dream job,” he says.",[1440,767,9],{"slug":7172,"featured":6,"template":680},"you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles","content:en-us:blog:you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles.yml","You Are Hired Two Gitlab Contributors Turn Their Success Into Full Time Engineering Roles","en-us/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles.yml","en-us/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles",{"_path":7178,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7179,"content":7185,"config":7190,"_id":7192,"_type":14,"title":7193,"_source":16,"_file":7194,"_stem":7195,"_extension":19},"/en-us/blog/you-asked-and-our-red-team-answered",{"title":7180,"description":7181,"ogTitle":7180,"ogDescription":7181,"noIndex":6,"ogImage":7182,"ogUrl":7183,"ogSiteName":667,"ogType":668,"canonicalUrls":7183,"schema":7184},"You asked, and our Red Team answered","We held a public, ask me anything with our Red Team. Here’s what people asked.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670889/Blog/Hero%20Images/security-ama-blog-header.png","https://about.gitlab.com/blog/you-asked-and-our-red-team-answered","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"You asked, and our Red Team answered\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2021-01-29\",\n      }",{"title":7180,"description":7181,"authors":7186,"heroImage":7182,"date":7187,"body":7188,"category":698,"tags":7189},[1010],"2021-01-29","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n> [\"Transparency is only a value if you do it when it is hard\"](https://handbook.gitlab.com/handbook/values/#transparency-is-only-a-value-if-you-do-it-when-it-is-hard) 👁\n\nThat's one of the lines that has stuck with me from my GitLab Inc. onboarding nearly 2 years ago. You know where practicing transparency is typically \"hard\"?\n    \n**Security.**\n    \nThankfully, I can honestly say that I work on a Security team that not only pushes the transparency boundaries in the industry, but also within GitLab itself. Take our [RedTeam](/handbook/security/threat-management/red-team/),  they’ve put out a whole public project called [Tech Notes](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/red-team-tech-notes) which contains deep dives on some of the challenges and vulnerabilities they’ve encountered in their work.  They also just held their first-ever, live and public [AMA/Ask Me Anything](/handbook/communication/ask-me-anything/#purpose) on January 26, 2021 and responded to over a dozen questions about the work that they do and how they go about doing it here at GitLab.  If you joined us, thank you!  If you missed it, check out the replay below.  We’d love to hear from you on whether you’d like to see an event like this in the future with our Red Team (or [another group within Security](/handbook/security/#security-department)) -- just drop a comment below, tweet/DM one of us on twitter or message [GitLab Red Team email](mailto:redteam@gitlab.com). \n    \n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/FCu7MiRX5Lw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n    \n## Who’s on the team\n    \n![GitLab Red Team](https://about.gitlab.com/images/blogimages/gl-red-team.png){: .shadow.large.center}\n     \n## Here’s what you asked\n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> Considering you're a full remote company, persistence on endpoints is still relevant in your activity or hunting tokens or credentials make more sense? Some Cloud services do not require you to reach them with VPN, so SSO tokens or credentials can be enough in some cases to reach sensitive information.\n{: #question}\n   \n**Note:** Added for clarity: “endpoint” refers to laptops and mobile devices.\n{: .note}\n\n**Steve Manzuik**: I think the security of our endpoints is still very important but you are right about SSO tokens / auth cookies being a bit higher priority for us. This is why Greg spent some time creating tooling, [gitrob](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gitrob) and [token hunter](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/token-hunter), around finding secrets that get accidentally leaked in code. In addition, many of the other scenarios we have tested have been focused on obtaining auth tokens or credentials. \n    \n**Greg Johnson**:  You’re definitely making a good point about initial access here. Early on, there weren’t very many options for tooling in terms of hunting for the types of tokens you mentioned.  We’ve put a lot of time and iterations into improving our ability to find sensitive leaks quickly.  The tools that Steve mentioned are constantly being honed, changed, and reimagined completely to improve our techniques and the accuracy of the tools.\n    \n**Chris Moberly**: I have a bit of a non-technical, non-operation take on this as well. We’re an internal Red Team, meaning that our “targets” are often our colleagues and friends. These are people that we work with every day. Just in terms of efficiency, it is important to gain and maintain trust with them. For example, if we have a question about how a tricky bit of code works, we can just pop into an internal development Slack room and ask. We do this all the time, and our colleagues have been amazing at trusting our positive intentions and helping us out. But, even beyond efficiency, it simply would make for an unpleasant work environment if our colleagues were constantly worried about us trying to exploit their laptops. This is especially true in an all-remote company where those laptops are inside their homes and often double up as personal machines. Because of this, I really prefer emulating endpoint exploitation and persistence; either with a dummy device or a willing target who is 100% aware of what is going on. This is where the concept of an “assumed breach” can also come into play. We need to understand the threat model for an endpoint compromise, demonstrate the extraction of credentials, cookies, etc that would exist there, and then move on to attacking the cloud services as others have mentioned above. I think a bit of persistence emulation would be good for testing the efficacy of endpoint management tools: like, can we keep an implant running on a standard endpoint build for the duration of an operation without triggering alerts? If so, what can be fixed to get those alerts happening sooner?\n    \n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> To evaluate an insider threat, do you consider to run exercises from authorized users? I mean, run an exercise to simulate a legit change in your system but with some malicious effects? For eg. spin-up a new web service or whatever with some backdoors in order to be able to keep access?\n{: #question}\n    \n**Steve Manzuik**: Yes, we also run exercises that we call “assumed compromise scenarios” which fall in line with this exact question. The high-level premise is focused on what happens once an attacker gains access: legitimate or otherwise. Then we look at what that attacker may do, where they may pivot, and what actions we can detect and alert on.\n    \n**Frederic Loudet**: As an example, we will start an operation from a shell inside our infrastructure (on a VM or a container), assuming a rogue internal user is starting from there or someone managed to compromise some of our defenses and get this shell access.\n    \n**Greg Johnson**: We also model many of the ways an attacker may try to achieve persistence with these operations.\n\n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> When conducting adversarial simulation and/or exploratory penetration testing operations, what systems / platforms do you use to store, collaborate on, and manage testing related intelligence (execution times, commands, findings, etc.)?\n{: #question}\n    \n**Steve Manzuik**: We leverage our own product, GitLab, as well as a product known as [Vectr](https://vectr.io/) that helps us map our attacks and related detection/response.\n    \n**Chris Moberly**: We also leverage our own self-managed GitLab instance to make TTPs (Tactics, Techniques, and Procedures) automated and repeatable. This is done by hosting our custom attack tooling in projects and writing CI jobs that run them on demand and/or at scheduled intervals. We have one tool that builds and executes in CI and outputs the results onto a [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) site that requires multi-factor authorization to access; which is a pretty cool usage of our available tools. Just to echo Steve’s mention of Vectr - that tool is awesome, I highly recommend checking it out. And if you want to brainstorm creative ways to use GitLab for tracking the operational bits, you can type “GitLab for project management” into your favorite search engine to find some cool blogs and videos on the topic.\n    \n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> How do you promote collaborations between your team and other security / application groups within your organization? What sort of collaborative operations does your team work on?\n{: #question}\n    \n**Steve Manzuik**: This is an area where our Red Team is a bit different than a traditional one. We try to be as transparent and open about our operations as possible. There are of course always going to be cases where we need to be stealthy and share less but we attempt to limit those as much as possible. Typically, when we are performing an operation we will pull in a resource from impacted teams to at least be aware of what we are doing. So for example, we recently worked on an operation focusing on our development processes and had resources from our [AppSec team](/handbook/security/security-engineering/application-security/) working directly with us and helping us with ideas and knowledge. Same goes when we are touching infrastructure things -- we will involve someone from the infrastructure team. \n    \n**Fred Loudet**: As another collaboration example, on some operations, we will create a dedicated chat channel and invite team members (infrastructure or others depending on the operation) so they can follow the operation “live” as we try to comment on what we do/what we find. It works really well, we even get ideas from those other members. They see we are not hiding anything from them and not doing it to make them look bad (ok, we may refrain from saying “yoohoo” if we manage to gain something good!).\n    \n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> How do you break the stigma of ‘red teamers are here to attack us’ within your organization? How do you promote an environment of trust when certain teams may go into collaborations/operations with the mindset of ‘these people are going to tell me my baby is ugly’?\n{: #question}\n    \n**Steve Manzuik**: This is why we try to be as transparent as possible when we are planning our operations. Before we even start work, we document the general test plan and goals and then typically meet with stakeholders to ensure that they are on the same page. It also helps that our Red Team is experienced enough to be able to deliver bad news without attaching ego or judgement to it as well. We make sure that everyone knows that we are here to help vs. just here to judge their technical work. \n    \n**Fred Loudet**: As Steve says, we are lucky Gitlab is pushing “transparency”, so it makes everyone more open to reviews and remarks from various teams. As mentioned in question 4, when it makes sense, we really try to involve the “targeted” teams fully into the operation, including if possible within the execution phase. And so far it works well, everyone sees what could be seen as “bad news” as “opportunities to improve” (It also helps Gitlab promote the “right to make mistakes and learn from them”). \n    \n**Greg Johnson**: There is a very human aspect to red teaming you can’t ignore.  Building trust with people is in essence a very simple formula.  We try to make sure that the people we interact with expect a positive experience through the planning and preparation steps that Steve and Fred mentioned, first and foremost.  We also do our best to make sure that this expectation of a positive experience is met in the end through all phases of the operation including remediation so there are as few gaps as possible between the positive experience people expect and what they actually get.\n    \n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> When planning an adversarial simulation operation, do you try to mimic the TTP usage patterns of known actors or do you tailor TTP usage to your organization?\n{: #question}\n    \n**Steve Manzuik**: Both. We leverage MITRE’s ATT&CK framework where we can, but have also had to adjust to some more cloud specific TTPs that are not well documented in ATT&CK. From our perspective, both leveraging the known TTPs as well as being crafty and coming up with our own are both very important to help raise the security bar. \n    \n**Greg Johnson**: In the end, we don’t limit our creativity, but we do make an effort to try to mimic attacks that leverage known vectors as often as we can.  We draw from a lot of different sources to inspire our operations as legitimate attackers will do the same.\n    \n**Chris Moberly**: To add to Steve’s point, ATT&CK is organized by Tactics, which are high level things like “Initial Access” or “Persistence” and then Techniques, which are very specific things like “create a systemd service” or “abuse set-uid binary”. The Tactics are a really solid foundation for pretty much everything we do, and we try to use those wherever we can. For the Techniques, though, MITRE prefers to include only items that have been discovered in the wild and have some level of attribution. That makes sense for the framework, but at GitLab we’re working with an environment that is quite modern (no physical networks, no Active Directory, etc). We need to be a bit ahead of the curve in terms of developing our own techniques: because we know they will work, and we want to be able to detect and respond to them now. So, we put in some serious time researching possible post-exploitation techniques for the various environments we use. We try to write about those things publicly in our [Tech Notes](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/red-team-tech-notes), as well, so that others can use them. Personally, I find this one of the more “fun” parts of the job.\n    \nI think we’ll probably also take a look at replaying known-attacks that hit major news headlines. One of the primary goals of security is to basically stay out of the news, so we can look at things like the recent drama with SolarWinds and say “how did it happen to them, could it happen to us, and what would happen if it did?”. That type of operation would look much more closely emulating the known tactics of known actors.\n    \n    \n_**Follow up question**: Are any of those cloud TTPs that aren't tracked in MITRE ATT&CK published outside of vectr or where the public can access them?_\n\n**Steve Manzuik**: This is something that we need to take a look at and if/when we do, we’d be publishing them in our [Tech Notes](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/red-team-tech-notes).\n     \n**Chris Moberly**: Some of these are already published there, in a blog-like format, but we could certainly produce more ATT&CK-like formatting if there is an appetite for it. If so, let us know! [mailto:](mailto:redteam@gitlab.com)\n    \n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> What exceptional/unusual skills do you have in your Red Team and how diverse is the skillset across the team?\n{: #question}\n    \n**Steve Manzuik**: I don’t know if we have any “unusual skillsets” that relate directly to our work. But our team has a variety of experiences and skills across all the security domains. Something that I know I look for when we are bringing in new team members is the ability to learn quickly. The fun but also hard part of our job is that things are always changing and there is always something new for us to quickly learn. \n    \n**Greg Johnson**: I will say that our skill sets seem to compliment each other very well.  We each have areas of strengths and weaknesses.  Usually if I have a knowledge gap I can fill it on the immediate team I work with.\n    \n**Fred Loudet**: There are however some “traditional” skillsets that are not useful at all here 😄! Anything Active Directory/Microsoft related is “useless”, same for “physical office” related skills like wireless or breaking into buildings. Our core skills basically revolve around coding/web/cloud computing.\n    \n**Chris Moberly**: I would say I’m probably the best on the team at writing low-quality code lacking in any tests. :) \n**Fred Loudet**: I am pretty certain I write crappier code!  \n**Greg Johnson**: We’ll see about that!\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://giphy.com/embed/ule4vhcY1xEKQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n    \n_**Note:** in our Jan 26 live AMA we ran out of time before being able to answer all the great questions we received.  We’ll answer them below!_\n    \n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> Does any of your testing focus on product security? (e.g. Testing if using GitLab would make a good c2 channel)?\n{: #question}\n    \n**Steve Manzuik**: Yes, in a lot of cases our exercises will either use functionality of our product or will be directly against the product. That said, we do stay away from doing appsec type testing which would overlap with what both our [Bug Bounty](https://hackerone.com/gitlab) and AppSec team focus on. \n    \n**Chris Moberly**: Ha! I love this question as it starts out pretty basic and then drops a really interesting bombshell at the end there. To start with the basic part, of course leveraging new or known bugs in a core product is always useful for a Red Team, so we definitely do that. But, personally, I find that the way a product is customized tends to be what introduces the most risk. So we look at the various dials people can turn, and how that could potentially provide an entry point into a system. Mark Loveless wrote a great blog recently about [making sure your self-managed GitLab instance is secure](/blog/gitlab-instance-security-best-practices/), that one is worth a read \n_Note from Mark: also see [this project](https://gitlab.com/gitlab-com/gl-security/security-research/gitlab-standalone-instance)_.\n    \n**Chris Moberly**: On to your next point. To start with, please do not try to use gitlab.com as a covert C2 channel. I'd have to read through the terms to find how many that breaks, but I imagine a few. I will say, GitLab can be self-managed, and there are some amazing things you can do with CI jobs and the \"GitLab Runner\" agent.\n    \n**Greg Johnson**: GitLab is used in very creative ways to manage all kinds of projects and while we don’t want to discourage creative uses, we also don’t want it to impact other users etc.  We look at abuse scenarios as well to help us improve our detection capabilities and defenses.\n    \n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> How do you address conflict in your team? Is it something that’s encouraged and if you have a diverse set of skills then differences in opinion stand to exist correct?\n{: #question}\n    \n**Chris Moberly**: I think we often have different ideas on how to approach things, but personally I've never felt that tread into the territory of \"conflict\". Because we are a small team (1x manager, 3x engineers) that is spread across time zones, we do a lot of work asynchronously. I think this setup actually has some built-in ways to work through differences in opinion. For example, instead of just bouncing ideas back and forth at the beginning of a project, we'll often take the time to come up with an initial proof-of-concept for an idea before sharing. If someone has a different take on it, it might take too long to simply say \"I think we should do x instead\", as we'd have to cycle through a day or two to get everyone to chime in. So, instead, that person will also come up with a proof-of-concept (PoC) for their idea. At this point, we have several working methods to compare and choose from - or, often we will discover while working on a new PoC that maybe the original idea was best after all.\n    \n**Fred Loudet**: On top of what Chris said, there is also the human factor and I think we are lucky no one in the team is particularly stubborn or has a strong ego 😉! I don’t recall that we’ve had real“conflicts”,  just different ideas but so far (crossing fingers!), we’ve managed to discuss in a non conflicting manner and chose what looked like the best solution to all of us. The Gitlab handbook even has a section regarding [conflict](https://about.gitlab.com/handbook/leadership/managing-conflict/).\n    \n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> In terms of the make-up of your team, is diversity in gender, background and race something that’s important and a factor in your team when considering the candidates, or do you find yourself picking from the same pool of candidates?\n{: #question}\n    \n**Steve Manzuik**: One of the advantages of GitLab being an all remote company is the fact that we can literally hire a candidate from anywhere in the world. Having this huge talent pool to pick from means that we can absolutely focus on diversity for our teams. Today, as you may have noticed from the AMA our team is not all that diverse when it comes to gender and race. However, we do have a diverse set of experiences to bring to the table. We of course want to become much more diverse in all of the other areas and will consider these factors as we grow the team. In addition, it’s worth checking out this blog post, [“What it's like to work in Security at GitLab”](https://about.gitlab.com/blog/whats-it-like-to-work-security-at-gitlab/) from [Heather Simpson](https://gitlab.com/heather) on our security team that highlights other team members across security and our efforts to build a diverse team. \n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> Does Gitlab as a company and overall executive management, understand the value the Red Team brings to the success of the company and how do you communicate the impact/successes of your Red Team activities? In some organisations, the security team is considered a cost to the business and a necessary evil but that’s about it.\n{: #question}\n    \n**Steve Manzuik**: In almost an overwhelming way our executives are always very interested in what our Red Team is up to. We find ourselves to be very lucky to have the support from my direct manager, his manager and then our executive team all the way up to our CEO. I think for GitLab it helps that everyone in that chain is technical and understands not only the value that we can bring but also that we can help reduce risk. That doesn’t mean that we have free reign though, we alway make sure that we communicate what we want to do and why we want to do it. Before any exercise begins we have already built a skeleton methodology / approach and defined what it is that we are trying to accomplish and why that matters to the company. When we hit roadblocks or snags we are quick to communicate those as well. GitLab’s [value of transparency](https://handbook.gitlab.com/handbook/values/#transparency) really helps us out here a lot.\n    \n    \n#### \u003Ci class=\"fas fa-question-circle\" style=\"color:rgb(252,109,38); font-size:.85em\" aria-hidden=\"true\">\u003C/i> With regards to career growth, how supportive has Gitlab been to the different members on the team and the different career paths they want to take which may be non-traditional?\n{: #question}\n    \n**Chris Moberly**: GitLab has a great [handbook entry on career growth](/handbook/people-group/learning-and-development/career-development/), it's worth a read. One of the things I really like about GitLab is that the desire to remain technical doesn't result in an early career dead-end. For starters, there are individual-contributor roles beyond \"Senior\" that allow one to continue progressing without taking on a management position. Next, there is a HUGE focus on taking time for learning and development; I try to spend most Fridays focused on taking online courses, reading books, and doing research that could be leveraged by the team. Beyond that, every other group at GitLab is always extremely helpful when it comes to knowledge sharing. So, I make sure to spend time with our friends on the Blue Team ([SIRT](/handbook/security/#sirt",[720,1578,9],{"slug":7191,"featured":6,"template":680},"you-asked-and-our-red-team-answered","content:en-us:blog:you-asked-and-our-red-team-answered.yml","You Asked And Our Red Team Answered","en-us/blog/you-asked-and-our-red-team-answered.yml","en-us/blog/you-asked-and-our-red-team-answered",{"_path":7197,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7198,"content":7204,"config":7209,"_id":7211,"_type":14,"title":7212,"_source":16,"_file":7213,"_stem":7214,"_extension":19},"/en-us/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring",{"title":7199,"description":7200,"ogTitle":7199,"ogDescription":7200,"noIndex":6,"ogImage":7201,"ogUrl":7202,"ogSiteName":667,"ogType":668,"canonicalUrls":7202,"schema":7203},"Zero Trust at GitLab: Mitigating challenges with data zones and authentication scoring","How we’re defining and aligning data zones in our Zero Trust implementation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680808/Blog/Hero%20Images/fabio-oyXis2kALVg-unsplash.png","https://about.gitlab.com/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Zero Trust at GitLab: Mitigating challenges with data zones and authentication scoring\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-09-06\",\n      }",{"title":7199,"description":7200,"authors":7205,"heroImage":7201,"date":7206,"body":7207,"category":720,"tags":7208},[1574],"2019-09-06","\n\nUpdate: This is part 4 of an ongoing [Zero Trust series](/blog/tags.html#zero-trust). See our next post: [Zero Trust at GitLab: Implementation challenges (and a few solutions)](/blog/zero-trust-at-gitlab-implementation-challenges/).\n{: .alert .alert-info .note}\n\n\nZero Trust is the practice of shifting access control from the perimeter of the organization to the individuals, the assets, and the endpoints. For GitLab, Zero Trust means that all devices trying to access an endpoint or asset within our GitLab environment will need to authenticate and be authorized. This is part four of a multi-part series.\n\n* Part one: [The evolution of Zero Trust](/blog/evolution-of-zero-trust/)\n* Part two: [Zero Trust at GitLab: problems, goals, and coming challenges](/blog/zero-trust-at-gitlab-problems-goals-challenges/)\n* Part three: [Zero Trust at GitLab: The data classification and infrastructure challenge](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/)\n\n\nIn previous blog posts we’ve covered both the history of the whole Zero Trust Networking (ZTN) scenario, and some of the areas where we expect challenges to successful implementation. In this post we’ll discuss the concept of “data zones” as well as an “authentication scoring system.” The general concept of data zones is not new; it is an established part of a layered security approach where zones of trust are created around groups of data, usually on the same network segment or system. A few things to note:\n* Our data zone concept simply groups the data according to access controls available for a system when granular control is not possible.\n* Our authentication scoring system is intended to augment our ability to allow access.\n* We’ve set up all of our access based upon the team member’s identity and job description, but it should also include information about the device and even the geographic location of the team member (as we shall see later).\n\n## Defining data zones\nWe have previously defined the [classification of data](/handbook/security/data-classification-standard.html) to include RED, ORANGE, YELLOW, and GREEN. We’ve also touched on the concept of [moving data either via automated or manual means, and data being transformed](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/). Where the data is stored should reflect the classification.\n\nThe immediate challenge with regards to data access is when data that is considered RED or ORANGE is stored on a system that has limited access controls, and granting granular access isn’t possible. In other words, we need to define zones where multiple classes of data may reside on a system that is unable to provide separation of access controls based upon our own data classification. The most common scenario will be either a legacy system or a system developed outside of our control, such as a SaaS company offering.\n\nWe’ve defined four zones that match the data classifications, and named them after the colors of the data classification:\n\n* RED ZONE for RED and lower data\n* ORANGE ZONE for ORANGE and lower data\n* YELLOW ZONE for YELLOW and lower data\n* GREEN ZONE for GREEN (this may not be needed as it is the lowest classification)\n\nIn general, the zones apply to data at rest. Data in transit, either transitioning in system memory between subsystems or transferred over a network between systems, defaults to RED ZONE since access at that level is considered critical. Therefore the ability to access systems at a low enough level to examine RAM or monitoring calls between systems is definitely considered the highest level of restriction, and data moving between systems is subject to the highest levels of encryption.\n\n### Here are the basic rules for a zone:\n\n* A zone can contain its own “color” of data or lower, not higher.\n* A zone will not allow access to a lower “color” of data within its boundaries without authorization to access the highest designation of “color” for that zone.\n* The boundaries of a zone are defined by the access controls specific to that zone.\n\nTo illustrate, if a YELLOW ZONE is set up to contain YELLOW data, it cannot contain RED or ORANGE. And while it can contain GREEN data, team members with access to GREEN cannot access that data while it resides in the YELLOW ZONE. **In essence, each zone where data resides must have controls that consider what data they might possibly contain.**\n\nTo explain this further, let’s say that there is a database that contains ORANGE and YELLOW data within it, but the controls in place are not granular – access to the database means access to all of the data contained within it. Therefore this database would be considered ORANGE ZONE, and those with access to only YELLOW data could not be allowed access that data in this database because it is in ORANGE ZONE.\n\n## Authentication scoring\nThere will be a scoring system for access to data, where a team member is ranked. This isn’t so much an actual system for points, but more of a reference guide on what it takes to be able to access different data.\n\n![Authentication scoring reference guide](https://about.gitlab.com/images/blogimages/authentication-scoring.png){: .shadow.small.center}\n\n### The earning of points is as follows:\n\n#### Basic access\n\nOne point for basic authentication. This gets you access to the GREEN ZONE and GREEN data.\n\n#### Basic access with U2F\n\nOne point if second factor authentication comes through the proper channel (for GitLab team members that is Okta with approved MFA, such as U2F). Two points are required to access the YELLOW ZONE and YELLOW data, so this, coupled with the previous one point for authentication, allows the access.\n\n#### Managed device\n\nOne more point if the authentication comes via a managed device (a device GitLab has issued to the team member). This is sufficient to allow access to ORANGE ZONE and ORANGE data.\n\n#### Healthy managed device\n\nIf the managed device is in proper health (passes checks for patches, proper configuration, etc) an additional point is given, which allows access to the RED ZONE and RED data. This is not to imply that we will allow “unhealthy devices” to access ORANGE data (for example), but that the requirement is strictly enforced for RED ZONE and RED data.\n\n#### Geolocation\n\nA final point is given for a managed device with proper health from proper geolocation (this is dependent on the particular RED data being accessed). There may be a requirement that specific data can only be accessed from specific countries, and this is to account for that.\n\n## A summary and what’s next\nIt should be apparent at this point we have a fairly complex situation to administer. We do protect our data but we want more granular control over the access to the data. In a lot of organizations, administrators will end up denying access to parts of their system to employees, and end up having to export portions of the data to those denied access. Additionally, sometimes administrators will grant too much access to employees who simply need to access small segments and do not need the full access.\n\n**At GitLab we not only want to avoid that, but we want to document, log, and automate as much of the granular control as possible. This makes other challenges such as onboarding, offboarding, provisioning of access devices, auditing, and other processes much easier. And if it is easier on both the team member and the administrators managing the systems, full adoption is much simpler. The last thing GitLab wants to do is to prevent or curtail the rapid growth we are experiencing.**\n\nDesigning data zones and coming up with an authentication method to gain access to the data in its zone will help clarify how we want to approach some of the challenges. We have a decent start, but to fully explain how they will need to be applied, we’ll go into a lot more detail in the next post. We’ll also discuss some specific ways to address challenges involving our infrastructure, including the role of managed devices.\n\n*Special shout-out to the entire security team for their input on this blog series.*\n\nPhoto by [fabio](https://unsplash.com/@fabioha?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,720,2057],{"slug":7210,"featured":6,"template":680},"zero-trust-at-gitlab-data-zones-and-authentication-scoring","content:en-us:blog:zero-trust-at-gitlab-data-zones-and-authentication-scoring.yml","Zero Trust At Gitlab Data Zones And Authentication Scoring","en-us/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring.yml","en-us/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring",{"_path":7216,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7217,"content":7223,"config":7228,"_id":7230,"_type":14,"title":7231,"_source":16,"_file":7232,"_stem":7233,"_extension":19},"/en-us/blog/zero-trust-at-gitlab-implementation-challenges",{"title":7218,"description":7219,"ogTitle":7218,"ogDescription":7219,"noIndex":6,"ogImage":7220,"ogUrl":7221,"ogSiteName":667,"ogType":668,"canonicalUrls":7221,"schema":7222},"Zero Trust at GitLab: Implementation challenges (and a few solutions)","Implementing change in an already working environment always brings its fair share of growing pains. What happens when that change is Zero Trust?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665982/Blog/Hero%20Images/jpvalery-9pLx0sLli4unsplash.jpg","https://about.gitlab.com/blog/zero-trust-at-gitlab-implementation-challenges","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Zero Trust at GitLab: Implementation challenges (and a few solutions)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-10-02\",\n      }",{"title":7218,"description":7219,"authors":7224,"heroImage":7220,"date":7225,"body":7226,"category":720,"tags":7227},[1574],"2019-10-02","\n\nUpdate: This is part 5 of an ongoing [Zero Trust series](/blog/tags.html#zero-trust). See our next and final post in this series: [Zero Trust at GitLab: Where do we go from here?](/blog/zero-trust-at-gitlab-where-do-we-go-from-here/).\n{: .alert .alert-info .note}\n\n\n*Zero Trust is the practice of shifting access control from the network perimeter to the assets, individuals, and the respective endpoints. For GitLab, Zero Trust means that all users and devices trying to access an endpoint or asset within our GitLab environment will need to authenticate and be authorized. This is part five of a multi-part series.*\n\nCheck out these other posts to get up to speed:\n* Part one: [The evolution of Zero Trust](/blog/evolution-of-zero-trust/)\n* Part two: [Zero Trust at GitLab: Problems, goals, and coming challenges](/blog/zero-trust-at-gitlab-problems-goals-challenges)\n* Part three: [Zero Trust at GitLab: The data classification and infrastructure challenge](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/)\n* Part four: [Zero Trust at GitLab: Mitigating challenges with data zones and authentication scoring](/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring/)\n\nAs with most things at GitLab, we’re taking a very open approach to implementing Zero Trust. We’ve tackled everything from the evolution of Zero Trust to the expected challenges and our planned work-arounds.  However, maybe we haven’t yet addressed a ZTN related topic, question or consideration that you’re interested in discussing.\n\nWe’ve been discussing how Zero Trust Networking (ZTN) presents GitLab with a series of challenges, and have suggested a few mitigation strategies. In order to fully understand some of these challenges and how to approach them we’ll need to drop a bit deeper into the details.\n\n## Immediate Challenges\n\nThe first major hurdle that comes with discussing Zero Trust Networking (ZTN) is a classic one: Getting the plan implemented. Any security professional who has tried to implement changes to an already working environment has experienced these growing pains. GitLab is an extremely forward-thinking organization and we're constantly implementing massive changes to our software. But this still doesn’t mean everyone welcomes every change with open arms.\n\nCurrently, things work. We have an environment that is remarkably stable and pretty secure despite all the changes. When the security department starts rumbling about certain types of changes, there is resistance. So we have to look at things a bit differently to get some of our ideas implemented. How do we do this?\n\n## Real Problems\nWe’ve previously discussed areas where we anticipated problems, but what we really need to do is look at existing problems and work out solutions. If we can get some hash marks in the “win” column for ZTN, it helps prove that ZTN is a rational approach for security. If we can solve some pressing problems along the way (or make older, less robust solutions better) it improves the general appeal for ZTN. It is one thing to expect resistance, it is another to encounter it. Security changes need to make things easier for the end-user, otherwise, the end-user will fight and try to bypass what are perceived as roadblocks to productivity. We can’t make every single person happy, but we can try to make as many users as happy as possible while making every single person a bit safer. That said, we encountered a bit of resistance in a few areas.\n\n### User identity\nIn the past, we’ve had issues with provisioning user accounts – we’d need to get a team member set up in all of the systems as quickly as possible. When the entire company had 35 people this was not that great of a burden. But right now the Security Department alone has 35+ people (and counting, we’re [hiring](/jobs/), hint hint) and we’ve had times where 35 people started at GitLab in a single week.\n\nWe’re growing! Any time we make changes to the process of user-identity, we have to keep in mind that most of the departments are more concerned with provisioning new users than actual user identity. Their main goal is to get new team members productive as quickly as possible, so access to systems immediately is crucial. Ideally, any solution for user-identity should work seamlessly with the entire user lifecycle – provisioning through deprovisioning – without disruption to company productivity.\n\n### Device management\nWe have issues with both device identification and device management. We need to strike a balance between ensuring team members have access to the tools they need to perform their jobs, and simply allowing team members to use whatever computing device they want to complete tasks and maintain productivity. Interestingly, this is one area where GitLab’s distinctive background as a company has created a rather unique challenge. We started as an open source project and only in the last couple of years have we been purchasing laptops for team members (for years it was [BYOD](https://en.wikipedia.org/wiki/Bring_your_own_device)). To help in this area, we’ve [standardized](/handbook/business-technology/team-member-enablement/onboarding-access-requests/#laptops) what the company will purchase for new team members (and older team members are certainly eligible for new systems). Having a standard platform is great. Our issue here is both a cultural one as well as a practical one.\n\nSince our roots are in BYOD, we cannot simply turn off BYOD overnight. In fact, I see a lot of benefits to BYOD in certain scenarios – typing up blog posts on a tablet in a coffee shop seems fine, code commits to critical systems are not. Anyone can contribute – that is a cultural core belief and our [mission](/company/mission/#mission) at GitLab. We have team members as well as non-employees that contribute to our code base, our handbook, and everything else we do. We don’t have some of the normal corporate standardization that a typical brick-and-mortar company might have, such as using the corporate-issued-laptop only with asset tracking and patch management built-in, forbidding the use of BYOD, and so on. We do have policies in place, but they are not proactively enforced because we lack the asset management solutions at the moment to do so at the level we desire.\n\nAs a security professional, I am thrilled we have standardized on Linux as our main infrastructure platform, Macs for team members, and engineering team members have a choice of Mac or Linux for the work laptop. No Microsoft Windows.\n\nHowever, trying to find a solution for asset management for our chosen platforms is a challenge. Most vendor solutions are Windows and Mac or Windows and Linux. There are some vendor solutions that support both Mac and Linux, but are often the more “Windows and Mac, and well, sort of Linux if you only run this ancient binary that dates back to an acquisition three years ago, I think Alice is still here from that acquisi- no wait she left” flavor.\n\nI haven’t even discussed phones. These are commonly used for various methods of multi-factor authentication, although we don’t currently have a good way to ensure the phones used for MFA are secured and fully patched. And many team members access work applications on their phones – email, Slack, Zoom, and Expensify, to name a few.\n\n### Sprawling infrastructure\nTo complicate things we have hundreds of servers/containers/instances on numerous cloud offerings spread around the world, and dozens of cloud-based “Something-or-other as a Service” offerings we use as a company. While we don’t administer all systems via [SSH](https://en.wikipedia.org/wiki/Secure_Shell) ([Chef](https://www.chef.io/) and [Knife](https://docs.chef.io/knife.html) are used heavily in our environment) there are still challenges with provisioning, and that we’re currently unable to enforce two factor for SSH. Yes, we can use Yubikeys to store keys and a few other tricks for SSH access, but getting things set up for team members to administer these systems is daunting.\n\n## Wins\nA lot of our problems with identity management at GitLab were solved by [implementing Okta](/handbook/business-technology/okta/#how-is-gitlab-using-okta), and entire departments were thrilled. Provisioning steps that took days had been reduced to minutes. [Okta](https://en.wikipedia.org/wiki/Okta_(identity_management)) has a number of features that supported our vision of ZTN, so if we can solve some ZTN problems with Okta, we’re doing it with a proven solution that people already use. If we can solve a problem with Okta it will be a much easier “sell” to the various impacted departments, and since we can implement a lot of our ZTN model with Okta, it is a win-win situation.\n\nWhile the Security Team felt that a number of security problems were solved with Okta, this was not how the product was “sold” to the rest of the company. The ZTN benefits were pitched as business solutions to existing business problems to the various business and application owners in GitLab – meeting provisioning and compliance needs. It was not sold as a security solution, and this approach worked well.\n\nOur use of Chef along with Knife has been a massive help with infrastructure changes, and has simplified a lot of the usual drudgery associated with system administration. For example, pushing code changes out to multiple systems is much simpler.\n\n_Can we apply any of the wins to our existing challenges?_\n\n### Enforcing Okta Everywhere\n\nBy trying to get the numerous SaaS solutions we use to only be accessed via Okta, we are looking to solve 70% (a WAG at best) of our issues in the SaaS area. This does not address everything. Some of the access to these systems requires not just traditional web-based access but API access as well. Not all systems integrate with Okta, or API access is completely separate, but this approach is working so far and things have gone reasonably well where Okta is implemented.\n\n### Linux-based Infrastructure\nIn the sprawling [infrastructure](/handbook/engineering/infrastructure/) arena, our greatest challenge is that some of our most critical assets are administered via SSH. As a result, we have issues with provisioning, deprovisioning, and enforcing other aspects of authentication that we take for granted with web-based assets. We are seriously looking at leveraging Okta and their [Advanced Server Access](https://www.okta.com/products/advanced-server-access/) (ASA) product, which looks like we could integrate SSH accounts into the Okta mix. Using ASA could allow for provisioning of a new administrator via group assignment. Since we get multi-factor, enforcement of GeoIP, and a few other bells and whistles via Okta, by using ASA we could resolve one of the hardest problems we currently face. This has the added benefit of making the compliance and auditing folk happy, to say nothing of just general time-savings.\n\nWhile ASA (and any similar product) requires we install software on the server side, we do have Chef and Knife to help with deployment. Rollout could happen quickly. Our main issues here would be performance impact and client software distribution, although a regulated testing period and a decent rollout plan could help solve those issues.\n\n### All those devices\nThis one is ugly. While moving more and more systems into Okta helps, it also emphasizes our biggest weakness – device management. After importing Okta logs into other systems for analysis, we can see what our team members are using to access GitLab assets. The good news is the majority of team members are using company-issued laptops, although we are not sure what patch level or configurations are in place. We do have company standards, but we do not have the level of control we’d like to ensure these standards are met. For example, we’d like to ensure that all team member systems accessing critical information ([RED data](/handbook/security/data-classification-standard.html#data-classification-levels)) are doing it from a company-issued system that is up-to-date on patches and is properly configured. We’d prefer to do it at the time of authentication, and not after the fact via log mining.\n\nPhones are already a touchy subject, since this is the main BYOD device allowed on most corporate networks. We use Expensify, and I cannot imagine using it without the phone app even though it is possible. I love using Okta Verify on my phone, and approving push multi-factor from my Apple Watch. I know Okta has some potential solutions, but unless there is a solution from any vendor that is BYOD friendly instead of full takeover MDM, I can’t imagine successfully selling it to fellow team members.\n\nThe main issue here is that device management is an important part of ZTN, and the tools to make this happen at the quality level we’d like don’t seem to exist. As stated earlier, we have a mixture of Mac and Linux desktops so we’d like one solution to make this happen, not two.\n\n## Conclusion\nWe did not intend for this blog post to be an Okta commercial, but it does happen to meet our needs for part of this whole ZTN equation. We’re still searching for a solution for asset management. I wish I could claim this was not a commercial for an asset management solution, because that is quite the challenge.\n\nWhile we still have a long way to go, we have a better handle on direction. Our points of resistance – both from team members impacted by change and technological limits based upon our environment – are showing where we need to focus but also how we need to approach things. Finding cultural and technological areas where we are doing well are huge strengths we can leverage, and by focusing our efforts on those areas, more of our environment benefits from Zero Trust.\n\nIf you’re implementing ZTN and have similar (or different) problems, I’d love to discuss. If you’ve got thoughts or a question, comment below, we’d love to hear from you.\n\n\n*Special shout-out to the entire security team for their input on this blog series.*\n\nPhoto by [Jp Valery](https://unsplash.com/@jpvalery?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n",[9,720,2057],{"slug":7229,"featured":6,"template":680},"zero-trust-at-gitlab-implementation-challenges","content:en-us:blog:zero-trust-at-gitlab-implementation-challenges.yml","Zero Trust At Gitlab Implementation Challenges","en-us/blog/zero-trust-at-gitlab-implementation-challenges.yml","en-us/blog/zero-trust-at-gitlab-implementation-challenges",{"_path":7235,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7236,"content":7242,"config":7247,"_id":7249,"_type":14,"title":7250,"_source":16,"_file":7251,"_stem":7252,"_extension":19},"/en-us/blog/zero-trust-at-gitlab-problems-goals-challenges",{"title":7237,"description":7238,"ogTitle":7237,"ogDescription":7238,"noIndex":6,"ogImage":7239,"ogUrl":7240,"ogSiteName":667,"ogType":668,"canonicalUrls":7240,"schema":7241},"Zero Trust at GitLab: Problems, goals, and coming challenges","We map out our Zero Trust goals, the challenges we expect to encounter along the way, and how we plan to address them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680704/Blog/Hero%20Images/beasty-ztblog-unsplash.jpg","https://about.gitlab.com/blog/zero-trust-at-gitlab-problems-goals-challenges","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Zero Trust at GitLab: Problems, goals, and coming challenges\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-08-09\",\n      }",{"title":7237,"description":7238,"authors":7243,"heroImage":7239,"date":7244,"body":7245,"category":720,"tags":7246},[1574],"2019-08-09","\n\nUpdate: This is part 2 of an ongoing [Zero Trust series](/blog/tags.html#zero-trust). See our next post: [Zero Trust at GitLab: The data classification and infrastructure challenge](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/).\n{: .alert .alert-info .note}\n\n\n_Zero Trust is the practice of shifting access control from the perimeter of the organization to the\nindividuals, the assets and the endpoints. For GitLab, Zero Trust means that all devices trying\nto access an endpoint or asset within our GitLab environment will need to authenticate and\nbe authorized. This is part two of a multi-part series. Read the first post, [The evolution of Zero Trust](/blog/evolution-of-zero-trust/)._\n\nThe benefits of Zero Trust Networking (ZTN) on paper are ideal and many rush to\nimplement it with no idea exactly what it is (or how to get there).\nWe [previously discussed the evolution of ZTN](/blog/evolution-of-zero-trust/)\nand have been working on ways to implement things since then.\n\nVendors have appeared left and right, trying to sell their own versions of ZTN, but curiously no\ntwo seem to have the same interpretation. All of them say\nthey have a product that does ZTN and it is “easy to use,” but of course if you nail them down on\nthe implementation they all seem to fall short in one area or another.\n\n## Defining the problem\n\nI alluded to this above, but let’s clearly restate the problem – how does a diverse company\nimplement Zero Trust when no one can seem to agree on any of it? The most common issue for those\ntrying to roll out a ZTN is that, to be successful, everything must work together.\nThe basic principles of Zero Trust – positively identify the user, positively identify\nthe device accessed as managed and secure, ensure the user and the device\nare allowed to get into the asset they are trying to access, and do all of this in real time –\nare somehow very hard for most people to understand.\n\nWe probably should have clearly stated in our last blog post that the biggest problem with\n[the BeyondCorp solution](https://cloud.google.com/beyondcorp/) outlined by Google is that it is\ntailored for Google. This series\nof blog posts will take a closer look at the issues we are trying to solve, along with our\nworking approach. It is tailored for GitLab and won’t necessarily work everywhere else.\nHopefully it will provide insight into our thought process, encourage some discussion, and\npossibly help others along their own paths.\n\n## What we want\n\nImplementing ZTN at GitLab involves determining what we want out of this, which is really an\nextension of our objectives for [GitLab security](/handbook/security/#security-vision)\nin general. What we want to do is the following:\n\n- **Protect the data that needs to be protected.** Different types of data need to be protected at\ndifferent levels, so we must be able to have that flexibility.\n- **Positive team member identification.** When a team member authenticates, we need to know it\ntruly is that team member, and we need to know what the allowable data is. This needs\nto happen in real time.\n- **Positive device identification.** We need to identify the authenticated team member’s access\ndevice, and based upon the level of trust associated with that device, determine whether the\ndevice is allowed to access particular data, regardless of team member identity. This needs to\nhappen in real time.\n- **Geo-location identification.** We need to identify the team member’s location while at work\nand restrict access to certain data based upon team member geolocation. This\nneeds to happen in real time.\n- **Automated access.** We need to subject all automated processes that access data to the\nsame data protection policies as team members and devices. Again, this needs to happen in real time.\n- **Logging.** We need to properly log all transactions for auditing and monitoring purposes.\n- **No weakening of existing controls.** Data must be protected at rest and in transit. Any\nand all solutions should not detract from this.\n- **Security should make things easier, not harder.** If we do this correctly, the process\nwill be streamlined. Team members in general should be able to do their jobs effectively and\nquickly. Security should be so streamlined that the process is not cumbersome, as this tends\nto inspire some team members to try and bypass it.\n\n## Expected challenge areas\n\nWe’ve laid out what we want to do, and it was pleasant to discover in quite a few cases\nwe are already doing just that. We just lacked either the real-time component or we\nsimply had some type of inconvenient workaround to protect data that inhibited team members\nfrom doing their jobs easily. Based upon that knowledge and what we want to achieve, we’ve\ndevised a list of potentially challenging areas we may encounter:\n\n### Our network\n\nWe are a company that has no perimeter to speak of, as all team members are\nremote. In a way this is good, since [we don’t have a corporate\nVPN](/handbook/security/#why-we-dont-have-a-corporate-vpn) and therefore don't have to\nface dismantling it. But we do have to ensure that we maintain some semblance of control so we\nare assured that as a team member authenticates, it is done in a safe way and is independent\nof the network they are using.\n\n### Our apps and our data\n\nWe use a number of products, including our own DevOps\noffering. While we can control our own product and alter it to better serve our needs\n(and subsequently release said changes to our customers for their own needs), this does not\naddress the public cloud offerings our infrastructure is based on and that we use on\na regular basis: GCP, AWS, Azure, and Digital Ocean. Nor does it consider the variety of\ncontrols (and their variations) used to administer and secure these platforms. Solutions\nthat present themselves as working just fine on one cloud offering may not work the same\n(or at all) on another platform, which causes its own challenges. It also does not address\nadditional services such as Slack, Expensify, BambooHR, Zendesk, and others that contain\ndata we have to protect. Therefore, we need to extend our protection to cover our\ndata no matter where it resides.\n\n### It’s not just us\n\nWe sell services to customers including private groups and projects\non GitLab.com that need protecting from the public and whose access is restricted from\nus on a need-to-know basis.\n\n### Scaling\n\nWe’re growing at a rather accelerated rate, in terms of both customers and team\nmembers. All solutions to problems have to scale, including security solutions such as ZTN.\n\n### Our customers are global\n\nThere are contractual obligations, and as well as regulatory\nand compliance issues, across the globe for our customers that need to be observed.\n\n### Our team members are also global\n\nThere are unique issues across our diverse team\nmember base, residing in more than a quarter of all countries on the planet. Each country has its\nown regulations, standards, and needs.\n\n## Coming next\n\nWe think some of these challenge areas might look familiar to many of you, and while we\nhope this post has been useful we're definitely heading into some deep and murky water going forward!\nIn the next post, we’ll take a dive into the deep end of this far-from-straightforward issue in to one of the more\nchallenging areas: our data and the infrastructure it resides upon.\n\n*Special shout-out to the entire security team for their input on this entire blog series.*\n\nPhoto by [beasty](https://unsplash.com/@beastydesign?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,720,2057],{"slug":7248,"featured":6,"template":680},"zero-trust-at-gitlab-problems-goals-challenges","content:en-us:blog:zero-trust-at-gitlab-problems-goals-challenges.yml","Zero Trust At Gitlab Problems Goals Challenges","en-us/blog/zero-trust-at-gitlab-problems-goals-challenges.yml","en-us/blog/zero-trust-at-gitlab-problems-goals-challenges",{"_path":7254,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7255,"content":7261,"config":7266,"_id":7268,"_type":14,"title":7269,"_source":16,"_file":7270,"_stem":7271,"_extension":19},"/en-us/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge",{"title":7256,"description":7257,"ogTitle":7256,"ogDescription":7257,"noIndex":6,"ogImage":7258,"ogUrl":7259,"ogSiteName":667,"ogType":668,"canonicalUrls":7259,"schema":7260},"Zero Trust at GitLab: The data classification and infrastructure challenge","The classification of data is a huge step in the right direction when it comes to handling Zero Trust, but it comes with its own set of challenges.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679055/Blog/Hero%20Images/close-up-colorful-colors-40799.jpg","https://about.gitlab.com/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Zero Trust at GitLab: The data classification and infrastructure challenge\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-08-21\",\n      }",{"title":7256,"description":7257,"authors":7262,"heroImage":7258,"date":7263,"body":7264,"category":720,"tags":7265},[1574],"2019-08-21","\nUpdate: This is part 3 of an ongoing [Zero Trust series](/blog/tags.html#zero-trust). See our next post: [Zero Trust at GitLab: Mitigating challenges with data zones and authentication scoring](/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring/).\n{: .alert .alert-info .note}\n\nZero Trust is the practice of shifting access control from the perimeter of the org to the individuals, the assets, and the endpoints. For GitLab, Zero Trust means that all devices trying to access an endpoint or asset within our GitLab environment will need to authenticate and be authorized. This is part three of a multi-part series.\n\nCheck out these other posts to get up to speed:\n* Part one: [The evolution of Zero Trust](/blog/evolution-of-zero-trust/)\n* Part two: [Zero Trust at GitLab: Problems, goals, and coming challenges](/blog/zero-trust-at-gitlab-problems-goals-challenges)\n\nOne of the main objectives for the Security team at GitLab is to protect data, regardless of whether it is our customer data or employee data. Instead of simply viewing Zero Trust Networking (ZTN) as some type of solution for authentication, we also look at it as a way to further our data protection. This poses specific challenges for both the data and the infrastructure the data resides upon.\n\n### Dealing with data classification\n\nWe’ve established a [classification of data policy](/handbook/security/data-classification-standard.html) at GitLab, so we understand the protections necessary. The emphasis of the data classification policy is to define mapping between access controls and data, where the level of sensitivity of the data can appropriately be protected. To help with the understanding and to allow for quicker identification, the four data classification levels are mapped to a color coding. The color codings are `RED`, `ORANGE`, `YELLOW`, and `GREEN` – with `RED` being the most sensitive data, down to `GREEN` being public data.\n\nThis classification of data is a huge step in the right direction when it comes to handling ZTN. That being said, when it comes to data classification there are a few areas where we anticipate challenges with regards to ZTN:\n\n* **The state of data changes over time.** Data that is in one classification may change over time based upon any number of factors. An example is `ORANGE` sales data for a public company – if disclosed before a certain date this could lead to insider trading. However after a certain date the sales data would become public, or `GREEN` data.\n\n* **Tracking of data/metadata.** The tracking of data and its metadata, including origin and classification that define requirements for handling, is non-trivial. Applying labels (data classification labels, not to be confused with the [labeling done within the GitLab software itself](https://docs.gitlab.com/ee/user/project/labels.html)) to data helps in enabling control of the data. These labels can be related to the data’s function as well as conditional access controls needed.\n     * For example, a US DoD instance of GitLab might require certain data labels such as “US citizen,” “on US soil when accessing,” “part of the US DoD project team,” and “GitLab team member not a contractor” in addition to other more standard restrictions. It is notable that the process of data labeling could be beneficial to meeting compliance standards as well, e.g. GDPR article 15 removal requests.\n\n* **Time limits on certain data.** Applying data classification labels to data will require time limits. In the above example, the label is “part of the US DoD project team,” and access to this data may expire after 30 days and need to be removed/re-applied for/auto-extended under certain circumstances, etc.\n\n* **Capability of data.** Departmental data collected might be subject to classification based upon what it is capable of instead of what it actually seems to be (think Tenable scanning data). The same would apply to customer-generated data, such as ZenDesk tickets. Basically, because we cannot control what a customer might say or what a security scan might find. It is possible that someone could have access to a system or even manage parts of that system, yet should not be able to see all of its data.\n\n* **Movement of data.** Departmental data that is transferred between systems, either automated or manual, could affect the classification of itself or the surrounding data, especially if the data is transformed or cleansed in some way. For example, situations and potential security problems reported via ZenDesk or HackerOne are often transferred to GitLab.com so we can “work the issue.” These are often sanitized to a degree. Here is a more detailed example to illustrate this:\n     * If the XYZ corporation reports a problem which is entered into ZenDesk, an issue is created for the Security team to work to resolution, and the data is in essence transformed. If the problem is authentication bypass using the APIs and it affects all customers on GitLab.com, only the mechanics of the bypass itself are considered relevant, and the fact that XYZ corporation reported it is not important to the resolution process. Therefore, XYZ corporation can be scrubbed from the Security team’s issue (and should be). As the original issue impacted XYZ corporation, it might have been considered `ORANGE` data impact, but the real impact affects more than one customer, so the problem is considered an impact to `RED` data. After a patch and resolution of the problem, we make the details of the situation public and include vulnerability, patch, and resolution information. We state it was reported to us by “a customer.” Association with XYZ corporation would still be `ORANGE` data. However, the previous `RED` classification of the problem itself is now considered `GREEN` since the problem is resolved and we have made the problem and its solution public.\n\nAs you can see, on the surface there seems to be no problem with securing our data with the assistance of ZTN, but once you start to explore \"edge cases\" one begins to reach the conclusion that these are not actually edge cases, but working examples of how we interact with our data. In most examples, this will not be a problem as we have granular control over our data, but when it comes to ZTN we need to make sure we consider the changing state of our data. The main thing we wish to avoid is an authentication decision being made based upon a particular classification of data on a system when the classification of that data is known to change over time.\n\nGranular data access is typically controlled at the system level, so we should be just fine. A closer look at our infrastructure may indicate otherwise, so a more detailed examination is required.\n\n### The infrastructure\nThe infrastructure needs to be defined, including some semblance of where the data resides and how it is accessed. For the systems we directly manage and control down to the very lowest level, we have a good grasp on what we have to work with and what controls are available to regulate access to the data they contain. However, a decent part of our infrastructure resides on systems we do not fully control.\n\nIn the modern cloud age, the rise of [software as a service](https://en.wikipedia.org/wiki/Software_as_a_service) (SaaS) applications has become an important part of everyday business operations. Instead of maintaining servers in a server room, a vendor uses the cloud and makes the application accessible over the internet. Each company has their own private set of data maintained by the SaaS provider, and may have different levels of features based upon price that allow them to manipulate and control the data. Examples include Expensify for handling expenses, BambooHR for handling HR functions, and so on. GitLab is no exception to this process. Deployment is often as easy as setting up accounts, and while we’re [working to unify our authentication process under Okta](/handbook/business-technology/okta/#how-is-gitlab-using-okta), it is still not fully deployed.\n\nAs we are an all-remote company, our infrastructure is all-remote. We do the bulk of our company activity inside the GitLab.com software itself, but we also use roughly two dozen SaaS companies’ offerings as well. There are the usual suspects such as Slack and Zoom, but as mentioned we are currently using Expensify, BambooHR, ZenDesk, and many others.\n\nSimply put, our infrastructure poses some unique challenges:\n\n* **Cloud controls.** We are a GCP organization. Also AWS. And Azure. Did I mention DigitalOcean as well? As one might expect, this can create challenges if one has to use parts of the underlying cloud controls to help with authentication and enforcement of access controls, and software components are being moved from platform to platform. Customers don’t notice, but team members handling administrative access might.\n* **Who controls what?** This is not as bad as it sounds, but it is often not 100% clear who has administrative access to different systems. I’d say it is a symptom of a rapidly growing company, but after having experienced the same thing in most companies I’ve worked at, this is a fairly common phenomenon. The problem at GitLab stems from the amount of growth and our own rather unique history: When the company was very small, a single team member might be in control of a piece of infrastructure that slowly scaled up and became huge. Then, if that team member leaves the company, most likely the team member’s department assumes control. Does anyone or everyone have that control now? Does each team member understand all of the data residing in that system? Do they understand that data in relationship to the data classification?\n* **The enforcement of SaaS application privileges.** For systems where we do not have control over the underlying components, enforcing privileges becomes tricky. If a SaaS app has a regular user authentication and the main screen has an “Admin” button to escalate privileges, does our authentication system handle this programmatically?\n\nFortunately we can leverage a number of the [compliance](/handbook/security/security-assurance/security-compliance/) efforts within the company to gain insight into what levels of control we can impose onto each system.\n\n### What's next\n\nIt sure seems like we have a lot of unique challenges! But we do have a huge leg up. For many organizations, the coming of ZTN means the end of the corporate VPN and the falling of huge chunks of the perimeter network. [GitLab doesn’t have a corporate VPN](/handbook/security/#why-we-dont-have-a-corporate-vpn) to dismantle, and as we’ve said before [we’re an all-remote company](/company/culture/all-remote/) so there is no perimeter.\n\nWe’ve discussed a lot of challenges, in the next installment of this series we’ll start talking about a few specifics we are designing to help make things easier. If you’re researching, implementing, or considering ZTN, what are the challenges you’re tackling? Tell us in the comments.\n\n*Special shout-out to the entire security team for their input on this blog series.*\n\nPhoto by [Pixabay](https://www.pexels.com/@pixabay) on [Pexels](https://www.pexels.com/photo/red-office-yellow-school-40799/)\n{: .note}\n",[9,720,2057],{"slug":7267,"featured":6,"template":680},"zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge","content:en-us:blog:zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge.yml","Zero Trust At Gitlab The Data Classification And Infrastructure Challenge","en-us/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge.yml","en-us/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge",{"_path":7273,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7274,"content":7280,"config":7285,"_id":7287,"_type":14,"title":7288,"_source":16,"_file":7289,"_stem":7290,"_extension":19},"/en-us/blog/zero-trust-at-gitlab-where-do-we-go-from-here",{"title":7275,"description":7276,"ogTitle":7275,"ogDescription":7276,"noIndex":6,"ogImage":7277,"ogUrl":7278,"ogSiteName":667,"ogType":668,"canonicalUrls":7278,"schema":7279},"Zero Trust at GitLab: Where do we go from here?","We take a look back at how far we've come in our ZTN implementation, and at the progress we still need to make.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679704/Blog/Hero%20Images/puria-berenji-Dyi1K2atCRw-unsplash.jpg","https://about.gitlab.com/blog/zero-trust-at-gitlab-where-do-we-go-from-here","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Zero Trust at GitLab: Where do we go from here?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-10-15\",\n      }",{"title":7275,"description":7276,"authors":7281,"heroImage":7277,"date":7282,"body":7283,"category":720,"tags":7284},[1574],"2019-10-15","\n\n*Zero Trust is the practice of shifting access control from the network perimeter to the assets, individuals, and the respective endpoints. For GitLab, Zero Trust means that all users and devices trying to access an endpoint or asset within our GitLab environment will need to authenticate and be authorized. This is part 6 of 6 in our series.*\n* Part one: [The evolution of Zero Trust](/blog/evolution-of-zero-trust/)\n* Part two: [Zero Trust at GitLab: Problems, goals, and coming challenges](/blog/zero-trust-at-gitlab-problems-goals-challenges)\n* Part three: [Zero Trust at GitLab: The data classification and infrastructure challenge](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/)\n* Part four: [Zero Trust at GitLab: Mitigating challenges with data zones and authentication scoring](/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring/)\n* Part five: [Zero Trust at GitLab: Implementation challenges](/blog/zero-trust-at-gitlab-implementation-challenges/)\n\nWe've talked pretty openly about forming our ZTN approach and the challenges we expect along the way – as well as the challenges we've already met. If there is an area of ZTN that we've not addressed, or if you're interested in diving deeper into the topic, we invite you to join us October 29, 3-4 pm ET for our [Zero Trust Reddit AMA](https://www.reddit.com/r/netsec/comments/d71p1d/were_a_100_remote_cloudnative_company_and_were/) where you can Ask Us Anything!\n\n## Where we are\nI guess it makes sense to talk about where we are at with this whole ZTN thing. In addition to establishing policies for team members (based upon job descriptions and placement in the org chart), we have classified our data and mapped out our environment so we know where all of the parts are. But there are a few items we want to explain with a bit of detail.\n\n### Getting SaaS\n\nUsing [Okta](https://www.okta.com), we have managed to get (as of this writing) 70 of our [SaaS](https://en.wikipedia.org/wiki/Software_as_a_service) apps under some semblance of control. This “control” has varied heavily – some SaaS apps cleanly and seamlessly integrated with Okta, and some were working kinda-sorta-good-enough to call them integrated. The majority of SaaS integrations work fine as they used [SAML](https://en.wikipedia.org/wiki/Security_Assertion_Markup_Language) and easily integrate in minutes. We can provision and deprovision accounts with simple assignments. Departments like People Ops can do provisioning within minutes instead of days. For some of the integrations, we can force the user to go through Okta, and in a few cases where we have sensitive data, we have extra security steps. For example, to access [BambooHR](https://www.bamboohr.com) users have to go through Okta first (and using Multi-Factor Authentication aka MFA) instead of direct access, and they have to perform yet one more MFA-style step of authentication just for BambooHR.\n\nAre there problems with this? Sure. Not everything integrates as well as [Greenhouse](https://www.greenhouse.io) or BambooHR, because each SaaS has implemented their own APIs and done their own SAML setup. Some don’t offer consistent interfaces to integrate with, which means that our team members can bypass Okta and go straight into the SaaS app in some cases, and in others they are forced to use Okta. This workflow inconsistency is sometimes frustrating for team members. We’re constantly [updating our team member instructions](/handbook/business-technology/okta/okta-enduser-faq/) on Okta usage and try to communicate it to all team members as best we can, but we are impacting some users’ workflows. For example, if you sign in via Okta, you need to keep that tab open in your browser, otherwise your Okta session will end and you’ll find yourself repeatedly “MFAing” until you’re blue in the face. Many people are not used to working that way, and not having all SaaS apps working exactly the same doesn’t help. But overall, the time savings and security are great gains for ZTN and we are quite happy with the implementation.\n\n### SSH access\nAs I write this, we are getting ready to start the [Okta ASA](https://www.okta.com/products/advanced-server-access) rollout to Staging to give it a good test. Like SaaS, we expect a few hiccups here and there – especially since this is a new product for Okta, [released earlier this year](https://www.okta.com/blog/2019/04/advanced-server-access-and-infrastructure-identity/). And talk about workflow changes – if you thought browser-based application users were picky, command line SSH users are a bizarre bunch indeed. Command line junkies practically have their own religion around workflow and we’re introducing a change to that workflow. Yes, it is a minor change, but it already concerns me. Truthfully, because I am one of those oddball Linux users who lives on the command line and I tend to get fairly picky after a couple decades of being able to adjust and customize every aspect of my experience.\n\n### Camo proxy\nThis will seem like a weird one, but mitigating a security issue actually helped us out from a ZTN perspective. There was a security issue reported via our [HackerOne program](/handbook/security/security-engineering/application-security/runbooks/hackerone-process.html) that allowed for malicious users to gather IP addresses from unsuspecting victims via embedded image files. The solution was to use Camo proxy to resolve the [issue](https://gitlab.com/gitlab-org/gitlab-foss/issues/55115). The Camo proxy was widely deployed to ensure all possible links were protected and had the side benefit of ensuring communications going through the proxy were encrypted. Encrypting communications was one of the items we wanted as a part of ZTN and, as it turned out, we’d already done it.\n\n### A sound foundation\nThere are two things we want from our servers and containers and databases. First, we want them buttoned down tight and properly secured. All of these systems have robust controls, and we can perform all kinds of monitoring, but we have to do it at scale. Tightening security controls is especially important if you are using some of the Zero Trust-ish solutions out there to regulate access to these systems. We’re talking about automation of access provisioning, so we want to make sure that minimal access levels required for data stored on systems *remains* minimal access. This means no escalation of privileges due to configuration mistakes or security vulnerabilities. We also want to make sure that all services being offered up by these systems are as secure as possible against compromise, either locally or remotely.\n\nSecond, we want complete visibility into our infrastructure. If something goes awry with a vulnerability being disclosed that potentially impacts our systems or a security incident happens, we want to be able to quickly assess the state of the environment, ensure patches are installed, receive alerts based upon custom triggers to help monitor everything, and so on.\n\nWe are using [Tenable](https://www.tenable.com/products/tenable-io) (mainly for assessments) and Uptycs (mainly for monitoring and alerting) in our environment to help with this visibility. Both certainly handle the basics just fine, in fact Tenable has been quite up to the task. We are facing a few challenges with [Uptycs](https://www.uptycs.com) as we’d like to do more than what the product currently offers. This may not sound like traditional ZTN territory, but it is. It does no good to offer up state-of-the-art authentication and authorization to resources that are poorly maintained and monitored. Like everything else in our company, we face issues with scale – our infrastructure needs to grow and managing the security of that infrastructure must also scale well. Right now we can manage the security of our environment just fine. In fact, it is quite strong, but a lot of it relies on manual intervention which has scaling issues. We have a lot of hash marks in the “win” column with Tenable, but as we scale and expand we’re challenged by Uptycs. In the spirit of openness, we’ll keep you posted on how this progresses.\n\n### The log ride\nTo get a grip on all of this activity, we need to be able to grab all the logs, toss them into one place, and make sense out of them. Our goal is two-fold: we need to understand how our system is being used so we can fine-tune it and we need to be able to detect anomalous events that could signify potential breaches. All of our systems put out logs, and we’ve designed systems to monitor those logs. It is nice to automate alerts so as odd events occur, we’re immediately notified, and in some cases, issues are automatically opened for further triage. We’ve started down this path with deployment of several technologies, related to the [Logging Working Group](/company/team/structure/working-groups/logging/). We’re in the initial first steps, and we expect that logs generated from the various ZTN implementations will help improve the logging efforts, perhaps even propel it along quicker as we work out the kinks.\n\n## The Budget Issue\nA big ZTN question we get involves budget. After all, one company’s solution may involve a couple of small purchases and a large effort of tweaking and reconfiguring existing technology that is already deployed. Another company might have to make some major investments in new products just to get started. In other words, how do you budget for a solution when you don’t know exactly what that solution will look like?\n\nThis is probably one of those things a lot of organizations do not discuss, at least in any detail outside of “it’s expensive”. The idea of ZTN as a concept is an easy sell to most organizations because the benefits are so great. At the lofty bullet-point level on vendor slides, they often seem completely undeniable. But when you break down a concept into digestible and deployable components, you are often into interesting budget territory. Getting a department to buy into the concept is much easier than getting a department to alter their budget and purchase the XYZ product, deploy it, maintain it, and oh yeah please give the security department all of the logs. Of course this is a slight exaggeration to convey a point, but it is more often on the mark than not. We simply couldn’t fully budget for most of this because we didn’t know what we were going to be deploying until we found a particular solution.\n\nIn this case we have to be able to show an [ROI](https://en.wikipedia.org/wiki/Return_on_investment), which means we need to help a department understand the benefits and actually show an improvement to that department’s bottom line. For example, Okta has allowed us to change some onboarding and offboarding processes from days into minutes – and it's a massive timesaver. The push for Okta ASA is because our Infrastructure department saw the gains realized from our Okta rollout, and asked for something similar. Regardless of which department’s budget this could go against, it has to be sold to someone internally. Showing an ROI that clearly states we could financially benefit in one or more areas is really the only way to go about it. Showing the benefits is critical when you are searching for solutions to problems with no idea which solution will work.\n\n## Advice\nSince a lot of people ask for advice on ZTN in general, I’d like to share some impressions from our experience. Here are some major things that really have helped us.\n\n### Break down your needs into simple components\nYou do this by defining the problem end-to-end. For us, we could break it down into user identification and authentication, device identification and authorization, data classification, and policy enforcement. Each part was further broken down into smaller pieces – which includes a lot of what we covered in previous blog posts. This deconstruction helped us understand all of the areas we needed to work with.\n\n### Look at areas of winning\nIf a deployed technology is already solving part of the problem, can it be expanded? If it can’t, why not? Where are the gaps? List those gaps and use them to identify possible solutions during the review. We covered this topic in detail in a previous blog post, [ZTN implementation challenges ](/blog/zero-trust-at-gitlab-implementation-challenges/).\n\n### Ignore the vendor “spin”\nThere are vendors that sell solutions where they claim to be solving ZTN. In my ancient past, I worked for a company that sold (among other things) system administration tools. One day our boss handed us a list of compliance guidelines for three different standards. We were to go through each bullet item for each standard, point out the system administrative tools and the various system checks in our products that lined up with each bullet item, and write them down. This process took a few days, and by the end of the week each compliance standard had a list of checks. The product team grouped these checks together, and just like that we were a compliance company. Now the product line was actually quite good and robust which made this fairly easily, but the pivot of the company to being compliance-focused took longer for that marketing team to print up flyers than it did for the tech part. Yes, we were incomplete – we weren’t asked to write additional checks, we were asked to just use existing checks. But we literally were ready in less than a week with something we could call compliance.\n\nMy point here is that I often get the feeling that ZTN vendors do the same thing. They looked over their existing product line, figured out what they could even remotely claim as being a part of a “Zero Trust” solution, and overnight became a ZTN solutions provider. Of course, if your own organization’s world view on what ZTN is lines up with a particular vendor, great! Buy it. But, for GitLab, we had to break down what we wanted the various components of our technology and data to do and align them with our own ideas of ZTN, refine our model, and then go find vendors that did extremely specific things. For example, we’ve approached Okta with the breakdowns we are trying to solve – and they have products that solve them. For the most part we’ve ignored the whole “ZTN packaged solutions” approach and went after the core of what their products do, and we’re solving our problems as a result.\n\n## Conclusion\nWe’re getting there. We have a lot of wins, and a number of interesting challenges. Every once in a while we will post a new blog to keep you current on our security saga with Zero Trust, and hopefully you can learn from our examples – including our challenges – and help make your systems, data, and users as secure as possible. We hope you’ll follow along and, if you’ve got a ZTN viewpoint to share, we invite you to comment below.\n\n*Special shout-out to the entire security team for their input on this blog series.*\n\nPhoto by [Puria Berenji](https://unsplash.com/@ipuriagram?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText). \n{: .note}\n\n",[9,720,2057],{"slug":7286,"featured":6,"template":680},"zero-trust-at-gitlab-where-do-we-go-from-here","content:en-us:blog:zero-trust-at-gitlab-where-do-we-go-from-here.yml","Zero Trust At Gitlab Where Do We Go From Here","en-us/blog/zero-trust-at-gitlab-where-do-we-go-from-here.yml","en-us/blog/zero-trust-at-gitlab-where-do-we-go-from-here",{"_path":7292,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7293,"content":7298,"config":7303,"_id":7305,"_type":14,"title":7306,"_source":16,"_file":7307,"_stem":7308,"_extension":19},"/en-us/blog/2019-gartner-aro-mq",{"title":7294,"description":7295,"ogTitle":7294,"ogDescription":7295,"noIndex":6,"ogImage":2720,"ogUrl":7296,"ogSiteName":667,"ogType":668,"canonicalUrls":7296,"schema":7297},"GitLab named Challenger in Gartner Magic Quadrant for Application Release Orchestration 2019","We're happy to share that GitLab is a Challenger in Gartner's 2019 ARO Magic Quadrant","https://about.gitlab.com/blog/2019-gartner-aro-mq","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab named Challenger in Gartner Magic Quadrant for Application Release Orchestration 2019\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2020-01-16\",\n      }",{"title":7294,"description":7295,"authors":7299,"heroImage":2720,"date":7300,"body":7301,"category":675,"tags":7302},[3074],"2020-01-16","\n\nWe are pleased to share that recently GitLab was named a Challenger in the Gartner 2019 Magic Quadrant for Application Release Orchestration. ARO is a relatively new area for GitLab, but we believe our placement as a Challenger compared to last year’s placement as a Niche Player reflects the work we’ve put in and rapid progress we’ve made.\n\nYou can visit our [ARO MQ commentary page](/analysts/gartner-aro19/) to read our thoughts on the ARO markets and this report along with the lessons we learn participating. We’ll be adding links to this page to our roadmap items that show our plans for continued improvement. \n\nGartner, Magic Quadrant for Application Release Orchestration, 7 October 2019, Daniel Betts, Chris Saunderson, Hassan Ennaciri, Christopher Little Gartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research organization and should not be construed as statements of fact. Gartner disclaims all warranties, express or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose. \n{: .note}\n\nImage by \u003Ca href=\"https://pixabay.com/users/pisauikan-4552082/?utm_source=link-attribution&amp;utm_medium=referral&amp;utm_campaign=image&amp;utm_content=2682641\">pisauikan\u003C/a> from \u003Ca href=\"https://pixabay.com/?utm_source=link-attribution&amp;utm_medium=referral&amp;utm_campaign=image&amp;utm_content=2682641\">Pixabay\u003C/a>\n{: .note}\n",[1440,9,675,109],{"slug":7304,"featured":6,"template":680},"2019-gartner-aro-mq","content:en-us:blog:2019-gartner-aro-mq.yml","2019 Gartner Aro Mq","en-us/blog/2019-gartner-aro-mq.yml","en-us/blog/2019-gartner-aro-mq",38,[660,685,707,730,752,774,795,818,839],1751484543048]