--- # Source: uma/templates/service_account.yaml apiVersion: v1 kind: ServiceAccount metadata: name: uma namespace: dx-uma --- # Source: uma/templates/secret.yaml apiVersion: v1 kind: Secret metadata: namespace: dx-uma name: caaiops-uma-certs data: ca-cert.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ5Z0F3SUJBZ0lSQU9YSXhBTXArQ3JCNC91ZGIyb1ZTM1l3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSmJHOWpZV3hvYjNOME1CNFhEVEl6TURFd05USXhNamN3T0ZvWERUTXpNREV3TWpJeApNamN3T0Zvd0ZERVNNQkFHQTFVRUF4TUpiRzlqWVd4b2IzTjBNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUF5cUlDSHFYcnJSR2tGNEpKbUJ0MXZrWldKckJDRHAzNXdUbDdnVTEwNkM0M0Z5S0QKWTZWamY2MlFEaHZsMitBWkVPT2ZURnNESFFqSkQyWUJUM0s2Y1IxSHU1UEVhOUxtODVFVkluUEJBblZJUy9iMgpLYVc4UFM2ZlpndjBXaVp2S1RuUGlhVVBscjk5TjU0bWFMSHVOUS9rWlE1MEpjU0ppUGQyb00vbm8yQXo2aTVRClVWRDZYVURlRFN3elRwcERHd0ZxOXlTTDJMOG9lcEdyYkVUYXdCRVNqWTc1VmIzSXJqWEx6dURYTjNDTWowbjkKeXZuNXk0UklnZms2dmhpblFkNDYwMHFOSmFNWkdmK0dPenN6ZlZOcUFybHBHQ2U3RVRSOTJZVkU5enB5czM4eApxaTI1SmdVcDBVZjB1eHQwaUU4QlpyWjFEWElKUVJFRVpDQ1ZDUUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFCkJBTUNBcVF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdIUVlEVlIwT0JCWUVGTVdTcmZZY1ByaWM5cE1jbjlUdFJnVlJzMXFCTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQkw3T2ZnZFZrYUZKN0xESm1lL08vRE51akE1YjYvYVhlV3YxWHhuTG9ZMERUaEpmWldONW5ICmdkM2NLMFIxOFpCSUhPWmF3UnZUTjVGN3FGUnQrWHFWSlY3NFM1SGZycU9mRXBhajVZdXdQdUNBSGg3OHora2YKc2FGWndoRFpXMUY2WDNaZ3RITUExYXVOaFM5YWtmRXlmNnF0cVk2cS9vNDNSamYxNmRTMWczdU1XZ1U0OUk5ZgpPUDlXUVFKSzIvd0lkcnZlaWlmRnozMVRkOTdzNzc0UWFjbzZJSDRrTHJwc0wrMmpSbDJRWUNIT1ZzOU8vZktkClVndVdqTVVVRTRId1Fzb3dkNHZUK1lLTDhvWUJZTUd5ZTEwUS8vVTcyNktFem5wa0NZZ0NNVDd0UitPREN5VksKclhhOWhCb2pRNkV2aHJ5N3M3TnY0SjdXQ2FCRGh2Vm0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= cert.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lSQU9TZms1dGNHZTFFK0dCTEs2ekFweWd3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSmJHOWpZV3hvYjNOME1CNFhEVEl6TURFd05USXhNamN3T1ZvWERUTXpNREV3TWpJeApNamN3T1Zvd0ZERVNNQkFHQTFVRUF4TUpiRzlqWVd4b2IzTjBNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUE1R3JHQld5bUtIdkdsWjlXM0tnMk5HVmlDYWZQN3pSRit0elhTU3Q5ci95QVFNTDUKbG1uL2NlNzN3V1FXVkg0d2ZGbnJaemZVUVJGNFhBWTh1UStTaUxnTmpUbGR0bnhLcE5pT0RFV25sbk1zemZZdwp5N2ZEM25vNXZmV0lZUFNIRGhQSW0rOWtGODR5SExwc01rN1h3MUhoTzFXQmw4ZmdQYzNHWTRoQUxkSWdNL1NnCk5nYXUwanE4bExnUVp3eVlpYmhmb2FjaVVjcXBvcThwV0hGNFNGK1ZWK0h5Z3B0SWJzQm04a2xDYVB6d0ZBaTIKUWdEcVZHUTlXTHdIWHJLbHB1YzlJd2Q5bVFuWitFZlJPWHlIdlBpdGVvU0dHRUJuQUdqTWFpcWFpN0U5YlJxdQpBTHE4a0tKRlI5d0lzSHNzTWpHQmVJSitqY0tGTWNYNlVZaUNvUUlEQVFBQm96OHdQVEFPQmdOVkhROEJBZjhFCkJBTUNCYUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01Bd0dBMVVkRXdFQi93UUMKTUFBd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFEbDRRd3QyaVl6U2pscnZSMjJsUjl0WlNZQUIrdUliNzRtawpFWXF0K01lQ2RXM3Mrcjh1NTkxRER4QUxDSEphTlMyc1pLQlFyYkJXdCtRR2RWSlMxcmlzTG5odk1jY053YnhwCldiTTNuM2FGVGNFZXA1ZXpjdlU3QmpkUFpKNWVKbFdJZUg5Z0ZhdWFka0QvWU4vdXBYY2xxd3p0bTE1RGl2SzgKZC9OSE9TdzB2RkdJL0FaZTNrUGRISmlVbDBzdzR3TFJyK1lpODZVK2M0RTNVUDBwL3V1amcwK1YybzNjaWdtbwo0UXZhZTBudWlEdmFud0N6UFhoRjgyWjhHVXVkSVBXY1F6aktuVUlBSFA0ZlRwRkwrVFVIR0ZMV2grYW5MVGptCnFSUXlaSGtVNGdMWElQS0F2eHl3bmpEOERjdGpCMklTaDAzem14VVhwajNDTkpVamR1dz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= clientcert.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lSQUtSVDhlcVVRVkIrSVJCSHBiZGE2dGt3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSmJHOWpZV3hvYjNOME1CNFhEVEl6TURFd05USXhNamN3T1ZvWERUTXpNREV3TWpJeApNamN3T1Zvd0ZERVNNQkFHQTFVRUF4TUpiRzlqWVd4b2IzTjBNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUFyb3ZyaXR1R3FjUzgyNDRIbTB4aDVITGViMFc2WEdYanBBZkdWT0NuLzkzaktYRGsKTk5HTTZnTnYxajg5Y2k2ME5BVFBudFh1SXIzeU5MTjBCVkEydTlGQWpJMzRTdVdlUnVxeEV0MVVnSTBWS2Z4YwpGSHpBR3k1ZTJyRlJqN05SU3dDZU1mZkJieWxPYWp0REc0ZGxYTWNhTGs1Y2lYMXZFaTN2QzNacCtQcHM4UXVtCklqdXR6aVNiMGttNm5MUlgySUtBZ0UrZjJ3VnlHQS80Wm1Vc0Z1OGRBc1BUeHBqdTBwZFArUnRXY2pUV2pkckoKdkFoOXc2V1NGMHFZSGxod054dmNHRGlxZ3JRdFBhbEkxNVoyL1hlcUo0Tk8zRjgwYTQ0eU9sWVdOKzdBSkNlYgpjSlZRUnUzdkVWaHNVQm1qL0FtMS81UVc2T1kzYkZOOUxhU1JKd0lEQVFBQm96OHdQVEFPQmdOVkhROEJBZjhFCkJBTUNCYUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01Bd0dBMVVkRXdFQi93UUMKTUFBd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFNVkw3YU5xWWVMamMvcmZiVENkZzJKdlZHN3RrOHRDNCttUQpDVGN6clhJK2pJbWY1SFpPQmpuOWV0aVRhK1pGbjB3ZEhyNkNxTnNPTEdJQk5BNGtkRklFYVJ4VnM1V1hmUGpzCitxQ1QxZ01vOTFta0N3SmJIcW5JMVpzbU1kcDB3dThQeUUydWk4dVNIdVpvdERxU3oyM0RwSzNWcHhxSDFGNjQKTk1vdWZzL2tFVkMvTWRXaHFKaGRGU0c4TEFteEg4ellGZ0ZldW9VNmxpd01SREVHRExPUUVLTlNEWVhjeXU2MApudUZpZ2d2MTlhVGt5ODg5REpPbUlNVFpnT0lLRUEwUWdUUldwN1RKL0Zqd1lRVGVnMXFNc1ZXc1V2Y1orZ3h3CnJBaklDeGczZjhTU1k3N2Z6OUs3d01XcktCRGd0RW8zZE5GdmFLWi9zQ3VLT3BFa3FLWT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBNUdyR0JXeW1LSHZHbFo5VzNLZzJOR1ZpQ2FmUDd6UkYrdHpYU1N0OXIveUFRTUw1Cmxtbi9jZTczd1dRV1ZINHdmRm5yWnpmVVFSRjRYQVk4dVErU2lMZ05qVGxkdG54S3BOaU9ERVdubG5Nc3pmWXcKeTdmRDNubzV2ZldJWVBTSERoUEltKzlrRjg0eUhMcHNNazdYdzFIaE8xV0JsOGZnUGMzR1k0aEFMZElnTS9TZwpOZ2F1MGpxOGxMZ1Fad3lZaWJoZm9hY2lVY3Fwb3E4cFdIRjRTRitWVitIeWdwdElic0JtOGtsQ2FQendGQWkyClFnRHFWR1E5V0x3SFhyS2xwdWM5SXdkOW1RblorRWZST1h5SHZQaXRlb1NHR0VCbkFHak1haXFhaTdFOWJScXUKQUxxOGtLSkZSOXdJc0hzc01qR0JlSUoramNLRk1jWDZVWWlDb1FJREFRQUJBb0lCQURuM3Z2bHhjVm1reitqNAozeHB3dGhEbUNkTlplelBCbUdEcDJ0ais2STdaRGROaHArVzhVTStoVGM4T2ozd3g2aXdreGRFVUloSEJWVUpGCmN2T2tPQlBxWWNZTllQTUc2QXFkeXE3aW5uMVlEVzdYbzlxUGNHM3RwdW5HZmkzYzRNbDV5OWVIRVdmWk5HNkcKQVI5alV1Nkd3UnBGd1BXOEMvU2pYRTIyTGxOeUhVcFVOeTVXd3lFQWNlYUpSYlM2bDNwWlB3NmZvUXVqazlKTQowOGxQZG5Ma3dPNU83U0JYOFJ1V0dHTUlMZkdVeFlMRkZ2MWJwUGFibzFjN0w1bEoyTFRqZGRqdXFoNFZhbnhXCi9BOWpqMDJlZXRJWjhIWDdaUFE1eWdXMDhZRVBjUDhpTDhMdDBMZnF6V0pYY0Q5aEFLc21QUkd5aVZ2WnpUNzcKN1EvQVdvRUNnWUVBNXJITG9NazNvTU9YZjlNc0tjOENlUGJ3TVRoWER3NG1XTEZ5anBmaDdOUE1JcURxZEs5agp3WHF6VWVzOTh3RysvVFkvNkYxQWFSQ1QzdjNSdDVNWnBISUN4N1lZSzY2NUVqaEpVK29seVc2c21mM1NGdTA2CjRrTHdRTk1JK3RNdkF1a2NJLzFTaTc0aHk0elJUZUxKWmtBNy93dG5DenRZYTEweHMyemRGSTBDZ1lFQS9Ya0cKWDQ0V1ZBQ0xqTzd2VkZiRjExYnRPVC9XRFcwdzBxTW9rQkZwUU14eUpIbmt1TkdKd244MUNrd2RaV3JrNzJ6egpvT2xFZUp4VzdTWWMwTVhwUWozd3V1NEpBaXhTM1dVWTFaK2hJMEpqR0liWUV4QTkzcUtsaEZWRStlTVphSGVoCnRpUHp4VnY5bjVQUGRoeWEvZ2dFWjlSc3JGdlk2QkpBbXZCSXcyVUNnWUVBay9TVDFKczR3akt2aDV1NFIxZ3QKRGNmU1pMN3Rsd3BNTGJZZzhyTVdndjJUZnVJVmZLREFLZWEzKzJJVWFUWkZkOEREWEJQeXEyK21adTREQXVtbApHSUxHcGE5ejhSQXVRakUzVmtwbDBiclptQ25pQkRKQ0FidE11MGFlQzhCRjltZnJWR216OTB3S0FMSmZ5c1FZCnh5dUxUeGZSRHJNUkVrckRCWUJjaWtFQ2dZRUFzbHNOckFGTUh0Ry9ZR3praFNFRjEyKzRQdmQzUkQxWUJHK0EKWkZlTUMyZVF6YS9qekZUTkxjbmE1YU1IY0V5V1NROE9JMlRncEVuR05Oc0RuYmdWV1dKQ1dSYVR5aTZzSkZNMQo5V044dkZYRGF3ZS9WVEt4aGs1R1QyYTh3V1VXaE5iaE9FdjJCU0treFR2NTJHOXFkR1BwemJIQXdrdGl4bkhKCmVmeW9CcTBDZ1lBTlpvWEdNb2xjcFFzbXRKVXNnL3VVSndrVFJFdDNiRkJiU0VjWndpb01oZFRkUEJlV3VNbXUKVjkrNUYxNGlPeDE3WkNMZmJXN2p1RXNaNG5NS3FXQnpxUVBieUdRRzBnaTNvQ1E2Zzd1SGlkUUxrQUdVVnk1Zwo4RkxLUGlCbkkwL2hNakd0enE3azRuY0owU0ovdU1PaGxNaWxrcnl0NWc2cTBQMnNsNHNhZmc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= clientkey.pem: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQ3VpK3VLMjRhcHhMemIKamdlYlRHSGtjdDV2UmJwY1plT2tCOFpVNEtmLzNlTXBjT1EwMFl6cUEyL1dQejF5THJRMEJNK2UxZTRpdmZJMApzM1FGVURhNzBVQ01qZmhLNVo1RzZyRVMzVlNBalJVcC9Gd1VmTUFiTGw3YXNWR1BzMUZMQUo0eDk4RnZLVTVxCk8wTWJoMlZjeHhvdVRseUpmVzhTTGU4TGRtbjQrbXp4QzZZaU82M09KSnZTU2JxY3RGZllnb0NBVDUvYkJYSVkKRC9obVpTd1c3eDBDdzlQR21PN1NsMC81RzFaeU5OYU4yc204Q0gzRHBaSVhTcGdlV0hBM0c5d1lPS3FDdEMwOQpxVWpYbG5iOWQ2b25nMDdjWHpScmpqSTZWaFkzN3NBa0o1dHdsVkJHN2U4UldHeFFHYVA4Q2JYL2xCYm81amRzClUzMHRwSkVuQWdNQkFBRUNnZ0VCQUp4aDlpWFJxMlFkUlV1OGc2b3JGQ1hXbU4yKzdrcHNhemtDVWpxbmtUMVkKTW5kbGtzQ1Q0cG1qZktiS3dDVzJISFBpdjVYQ2Uvb2FqbytUSEdiS1NFK2NHaVlUNkpwS0FiWHQvYm8ySE1pVQpIcHNnWSt0akhjYjhrN1dDV3QzUnZncmh3ZHBvN0tESXRGM1BQTC9FWGhkeXQwMDNuZ2I5SCs5OHdnaFdEbm9uCmdWbkF0eXZUTTdZdnI0MVZoN2xBSXNHVlhWVkZMdTBTZ1MyOWl0eTV4NTdzSEY3NGJKTGdXeUo2Z0M0ZDlRYi8KNU9lUXRSYmNJSGRjdHBleFk2d25DdTB1T3hrc2g0TjRuTnhNclZ0WXBGakRtVW03clQzZ25HcGhvRUp0NUFnOAp2YisxSjROVGIxK1N6SzNVWll4V0U3aTB2YXZSWkJSSkplOEZra2xBcmtFQ2dZRUEyVE16cUZEY2F3SWNCckt4ClE0QlNDSlZBZ0pBS09RSjJXNm03WXBhM2F5SGRtUFpFNXNqemxieDcrcU5JSXk2RGVOcmUraElJWWJvc3NqZzEKU0RiSXNxbWZRSVhVSWg0bjBweDNlb2xKL29tVWpNR3Bva2J6dzdyMjhWZEoyNExWMFMvMHJCQnF6aXY0bmdCYQpXTWVMckRxdHVJZ1hFOXZyeTZDZ3prZVFtQWNDZ1lFQXpib2crTnU1cVRqcmtZeFlGampNVHJBOTJYa0l6N2pjClVWWWlXMGhuNG9pK1hmRm1Zc2tBSGRxQkZPdGh4S21yejlvd2c5NkJkanQrRWxwV2E5VG9IN2dZWnlzaWNtU0oKK3hVN0dBMnE2VnZ3UVYyVWFKb004Q3hXYTBUckJiaVdGODc3dXFZNjFDREQwRFQ0VXU2TUY4RG5aV2laV09wMworTmVKaVpyZXRlRUNnWUJDb1pPQ1dqUVVXOVJQTmJodHRjcjB1UnpOaytLZTlLTnNvVmhMK1dLUmFoVTZIS3FzCmhnODc2MU81WXlmNUt3YklKNVRwOEdoRDJzSmxkaHJhYVh3T1d6endwVjJiamRhM3BLcCttK2xvUGJxbHA2K3IKaWNxMFRBSC9yY0NKcDh5Q1hRelpVRHdEdmJlS2VSOCtSeEVSSi9zdE5ncW1XL3d0UHJ5Mkszd3ZOd0tCZ1FDTgpRelpxeXkya1JpSnpVY1FXbGkrc0JKYWpvdGxyRVF1dmlnNlRVWmVMMU11cTVCeWl6bFFYSzdlZzdyTzJmNjhyClRTNHREbmNsQVZyakxVYlVTbUNOanpLcWZOTHRhNTNYVzgrMUI0V0pNWWs2eVJBajkwdlZDd3pZSVgyajh3aVYKRnBxQm9xMFloZHNQTjZBZ0NnOEdMSDUzZ3FoeGhxYVVweU9ZNmsvczRRS0JnQVN3S2NNRVRVNTVCQ1JoVmwrMAp4K0JlS2dqZTFUa2tLWVVyQ2RiZjlLZUZ3SjJqT1FtRlJQSklvdGozUlYyczhMeDNYdWpZQUhSVS9MREhxVTVNCjErdFY0L2hMTElZRW52a3BjTDlRR3lYT3FDNWszSkxyNENMYlN0ZC92a2pOcE1PTVZkUTdKcGx4azFaZzBITHYKZnZES0dYVlRvK1FmVHlCODdRZUczckorCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K --- # Source: uma/templates/webhook_probeautoattach.yaml apiVersion: v1 kind: Secret metadata: namespace: dx-uma name: caaiops-probe-autoattach-certs data: ca-cert.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURNVENDQWhtZ0F3SUJBZ0lRV0NoSEFWcTgvWStwNE1tNGR4Z1FrekFOQmdrcWhraUc5dzBCQVFzRkFEQWoKTVNFd0h3WURWUVFERXhoaGNHMHRjSEp2WW1VdFlYVjBiMkYwZEdGamFDMXpkbU13SGhjTk1qUXhNREl4TVRJdwpNelF5V2hjTk16UXhNREU1TVRJd016UXlXakFqTVNFd0h3WURWUVFERXhoaGNHMHRjSEp2WW1VdFlYVjBiMkYwCmRHRmphQzF6ZG1Nd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURhZGI1ODlyODIKSzNzYjg5bHl2SGxaQjA2MWt1UzlkazFDUUV6L3B1aExvVmRvR1duTWR4NlpzcW1udzNJdnJZVDFaVHBHZlgxMQpmM1NyWmZJS3Z0NWhZY2thdXBKazFuaHg1b3RPUnNOWEtVQ2dFcmtURGc3TkpWN0E0N3BIWXIrMGZtWDRocnZtCkk5ai9uWnQ2Y09CMkl1ejE1YytjUG03NzV6S3E3NnB5Y3pQVkU4eXBuQ3N5aTlsM09PZUI1MVlNVkVFWHEyaW8KZW5oUnlsYkpreWRBRXhobTViOVpITk1MWmFkVmI0ZjJQLzc1V0FnVksrTDdWZHBWc09iSk50dFZyTHlFZ0w1VwpWTTlkS0ZIcjJ3WmgyZWhCVzYxUkxxd01hZXhrZDNYd1VoeW1ReVBGcnFEVTkwUXBtQU1saGIyMzI0VDgzcHY4CkZRbEV4VGxNVUNGdkFnTUJBQUdqWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDcERBZEJnTlZIU1VFRmpBVUJnZ3IKQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVWS9qdApHSXR5WUJjbi93NU1qT2dOZjBQWFdKTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQmxJdHBYZUtwWE1qcEhUCnh4a0NxQ2QxcnhUNC9lNUhhaCt4UnVCeVlXaGRYanllZW0zZGRxNWZ6OWNpQ0todmtkNDFyQW1nVGRLVHpzOXgKR0JYTS9sRG1YTmc2UThPV2FYd1Y2a283aHRQOVFrVG5PdUREcERGS21GUjEwUFpvaEFFeFdvV1JucW13M3NKKwpBYzNOQUt0am1PdFowZEVtSmp5U2dqaXV3d2MzaXhSV0xaTlpySTNCYWJ2blR1blBLbVpxb0lrTU5IbW5QcXViCkhWRWZtUzZCdSt0WGh3MTl1UmxSREQ3VnhjTW5kRnVLNjBzS2s5bEJLYVAwWU82S2ZNRUFzUkdkUW4rMnZlcGQKVXNoL2U4RmRtY0t6RHhTb0owTEVXUW1TZnBXZ2JXdjVOdVo5L05Ha2tEV2xMdFcvcktKT1ZLNGhiVmFML3BJRApMSEMyZjZvPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== cert.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURZakNDQWtxZ0F3SUJBZ0lRUHRiYzlLTkNDRUFIY1RYNzNhUUw3REFOQmdrcWhraUc5dzBCQVFzRkFEQWoKTVNFd0h3WURWUVFERXhoaGNHMHRjSEp2WW1VdFlYVjBiMkYwZEdGamFDMXpkbU13SGhjTk1qUXhNREl4TVRJdwpNelF5V2hjTk16UXhNREU1TVRJd016UXlXakFqTVNFd0h3WURWUVFERXhoaGNHMHRjSEp2WW1VdFlYVjBiMkYwCmRHRmphQzF6ZG1Nd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURDcFMrYWlOY3UKQVEvOG5uaVpYQllFK01xbkI2djhUcmVIUExMeGVuMHF1YnJwN29YblJKWkNRdzYzWFhpV0RTRXY4YXI1RlFnSAovbElmSG5mdGxlTXVzQWtmOG1qcW9mVDB1eDJzajk2ZktaVDAxc1lFakFMOGt1UVpZbTBaQlk3ZU5tS2Z4UEwwCmlDU29OOHRvdTRmdm1mTEFQV0RjV2FVSUFqR3RwUkpPT1BHSzhzMCtnS3B3Z0VFWWcwdm9ZS3ZMZ1NzeGFoZzgKd1lUb0g3azdUTE1Jdi80UTdad0pTUEg0NUVDVWFGV2ZXRVd3RUk2ZWxaNVBpT1E3NVZWb0ZIODRBdURaMkdtNgppOWpDa0ZJaWNuK3lGSUhOeFRUbW9zZExxRjN6bmRNdi9XNEVGVkRTWEdtNWc5aTZFRC9OL0g5dE94T2lqTWlwClNhL0ZsdXZFZ2JLVEFnTUJBQUdqZ1pFd2dZNHdEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CMEdBMVVkSlFRV01CUUcKQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNRThHQTFVZEVRUklNRWFDSDJGdwpiUzF3Y205aVpTMWhkWFJ2WVhSMFlXTm9MWE4yWXk1a2VDMTFiV0dDSTJGd2JTMXdjbTlpWlMxaGRYUnZZWFIwCllXTm9MWE4yWXk1a2VDMTFiV0V1YzNaak1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQWhPUlVWNXFTZFUzZDIKWG9OcjJaRk5IMzVSWmpaZzdJUUExOGhOUXE4WmZVclFiTThPK2ZXQTdXL3NSODY4QnVOem1MT0tKWEYrcWVyMgpYbVluRWtpQWw2bEd6bEZMQXRyZ2FwemFycUJydFJTVmFmc0VIS1kwRzEvNjFlUTUzNzJWaEZuV2dqQXd4aWduCm1Qd2c1OEQ1MWMrOEFiNEovY2haM09VcWlsU2FucnBxNW04dlhRV2VMa2lkMGp4OU54NWowcXh2Ym80QnZsM0YKR0dVK1lWR0IvWXRSUW01UzFGWkNrOS81eXJnR1ZiemhzNXkzemhaRGtXSXJTS1QwZ2ZReDhMazQwN0FMM2hkWgpHZ1NrWHZIendSL3MrQTZKN1JpUmM4aWQ0OUtWR2M3bHRlbHRNSGMvekQ0WkF3c2JacGIxRHJwRW43Vk9rd3pFCmk3RmFMS2dZCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBd3FVdm1valhMZ0VQL0o1NG1Wd1dCUGpLcHdlci9FNjNoenl5OFhwOUtybTY2ZTZGCjUwU1dRa01PdDExNGxnMGhML0dxK1JVSUIvNVNIeDUzN1pYakxyQUpIL0pvNnFIMDlMc2RySS9lbnltVTlOYkcKQkl3Qy9KTGtHV0p0R1FXTzNqWmluOFR5OUlna3FEZkxhTHVINzVueXdEMWczRm1sQ0FJeHJhVVNUamp4aXZMTgpQb0NxY0lCQkdJTkw2R0NyeTRFck1Xb1lQTUdFNkIrNU8weXpDTC8rRU8yY0NVangrT1JBbEdoVm4xaEZzQkNPCm5wV2VUNGprTytWVmFCUi9PQUxnMmRocHVvdll3cEJTSW5KL3NoU0J6Y1UwNXFMSFM2aGQ4NTNUTC8xdUJCVlEKMGx4cHVZUFl1aEEvemZ4L2JUc1Rvb3pJcVVtdnhaYnJ4SUd5a3dJREFRQUJBb0lCQUJvUzNLU2J6ZzlZanhhcwo5OFM3WkN1NFZCVTUzdDBHZ21rNFJWUTVyMGlvNXBQdG9yeFBJTDJ4aytnV09sQUhaRG15NnM5QTlQbDFKUFVNCk5NNTd0Sm9OZEx0MTBuNFR3ZWVDcFQvM2FiL3lad1RCUEhkaGRnajFlRjY2STVrbGVIVGwzaUkrMUxqUy8rNFUKdzBlOVQwRldZd0ZEZDVVczRRR0lvYlQ4RnM2cGIvMkxPR21vdVo0anFvMVNtUWJDOHUwTE04NmY2RnlkcjFJcwpYMC8vZTM4dTVueFJCT01Rcm9HcGFqMWsrbXkvQ3lHQUVuSlYybHA2NVNnSUdmODY5Q0F0aTRRKzlPdTNicFRSCldZbEN6b1huU1JPdURPdGhuamtOTk9nTDI4N3MvTjVnbEIrOURNR1ZnZTlCcGFzYVVhV3RNWitJVmgrKzhhVlUKeUVNVlZWRUNnWUVBOXZON0hpQUIxS0IvM1ZwMEhFR0ZPWHFRRVJpbGxnNjdKc2crWnlMc0R5b1ZIRzEralgrdQpaQmRxbmFCdlBab3NUVi91RkFBaU0rU3hiYlRKQUI1cHU1cHJlNlpBeVdudmQyYjROekJyRGhyYWt6aXpsZzUyCkV3TzFuaTZmVjJDcGQzNUJUcDJVWjhUZjhHYmdBNkNBK0Z6WXNLYjJ1ang0dXRGTkxocElBQXNDZ1lFQXljY04KSWdQV3Q0VEp6MmMrZ09Ib2JwenVLVHh3Y0c5WDBKVm42VStPSGJaTVBIQVl2RnhiOVlsTHpKSVRLeWhUWHBBaQoxSmlyRjVlQ0h1NkpFbUpIcXBlUWxLYlpsUmo5MDNTZHFpS0RVdVNuS3ZJTzFTL2ZySG5GWUdoRlFxbGQ4UjRQCjQ4RGMvU0FyVUZGemM4UE9KWVd6bm5wb0JaMiszYTBZUkhMY2hKa0NnWUVBNW4vZkZBQkdGU1AyRlRNZm0xd2cKc01UUWdkU2lnZ3lmN1JRTkF4RmFpdEQ1d1g0R0dLYnJyS3NhcmFRWkVNTzl4RDdkSnJmMU5NQ3FSNzd0dWZtcApNUjF5WTRpTGdqUXAreVlXV3RVTHA2K3pBcVZ6SW9XKytnS0ZXKzhpZFZsWTlKclk5RHNyVVN3MjVkbndFQktaCkxyamkzUWdLMVV3cVdCMlNUNHppVlg4Q2dZRUF5YnpPRUpndDhVWStUN00xOXIzbkdkQ2svVzJFTGFYazhwS3UKVmNKSW9NUkc3SlpXNU55aXhKY0V4ZisyVW92NjdxekZaakZOdmtTb3dDeFZ0NSt6YjRnaDhiM3dCUFg3c1pNQwpuN3ZwK05MTG9GQnJMQksxVzBoVVl0TmRqQXl4OUFUT2RqTzV2d3FPdzdycjUyNTVpV0puOTZjQ29iclp2ZUQyClJsbGd5T0VDZ1lCaWtwMnM0S1VpNzlucmx4ZEkrcnhwOTVrSCtRK3dZRUZoRFdLemRDa25FT1AwN1JjRDM2SUIKT3FaQzRSUkx3cmMzR2tZNVZPenBtdnE1cldaZUNBRFo2cXlpa24yelcxOVpYTEEvRjZUbEdqTU4rZVRlV0wwUApvaWV1VnpuRWFMOTlKOHYyUjc1VnJhc0gzNitEQnlWMVVNcmpZbDZ2L2pGQm8yOHZqRmovRHc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= --- # Source: uma/templates/configmap_general.yaml # Common config map needed for All extensions apiVersion: v1 kind: ConfigMap metadata: name: caaiops-config-common namespace: dx-uma data: clusterName: "SandBox" agentManager.url: "localhost:5001" agentManager.credential: "credential" agentManager.tenantID: "" namespaces: "" agentManager.version: "" agentManager.httpProxy.host: "" agentManager.httpProxy.port: "" agentManager.httpProxy.username: "" agentManager.httpProxy.password: "" monitor.httpCollector.host: "" monitor.httpCollector.port: "" monitor.events.enabled: "false" microservices_agent_enabled: "false" microservices_agent_enable_kernel_version_check: "true" microservices_agent_enable_agentPerApplication: "true" microservices_agent_filterType: "whitelist" microservices_agent_tracer_enableHttpTracer: "true" microservices_agent_tracer_enableHttpsTracer: "false" microservices_agent_tracer_enableMySQLTracer: "true" microservices_agent_tracer_enableAppFlow: "true" cluster_events_monitoring_enabled: "false" cluster_events_metricsEnabled: "true" monitor.events.filter.fieldselector: "type!=Normal" monitor.events.filter.namespace: "" monitor.events.elasticDocTypeId: "itoa_events_apm" monitor.events.elasticDocTypeVersion: "1" monitor.events.elasticProduct: "ao" monitor.events.clamp.perDayLimit: "15000" monitor.clusterPerformance.node.noscheduleTaint.skipMetricAggregation.enabled: "true" monitor.clusterPerformance.node.noscheduleTaint.allowed.tolerations: "" monitor.container.prometheus.exporter.enabled: "true" monitor.container.prometheus.backend.enabled: "true" monitor.container.prometheus.backend.endPoint.url: "http://:" monitor.container.prometheus.backend.endPoint.username: "" monitor.container.prometheus.backend.endPoint.password: "" monitor.container.prometheus.backend.endPoint.token: "" # Property to enable strict hostnameVerifier for the Prometheus backend secure connection. monitor.container.prometheus.backend.endPoint.hostNameVerifierEnabled: "true" #Prometheus properties monitor.container.prometheus.backend.endPoint.configFiles: "" monitor.container.prometheus.backend.endPoint.metricAlias: "container_name=container,pod_name=pod" monitor.container.prometheus.backend.filter.name : "" monitor.container.prometheus.backend.filter.value: "" monitor.container.prometheus.backend.custom.promqlConfigMap: "custom-promql-config" monitor.container.prometheus.limits.cpu: "2" monitor.container.prometheus.limits.memory: "1024Mi" monitor.container.prometheus.requests.cpu: "200m" monitor.container.prometheus.requests.memory: "300Mi" agentNaming.deployment.apmia.process: "ClusterDeployment" agentNaming.deployment.apmia.agent: "Infrastructure Agent" agentNaming.deployment.prometheus.process: "ClusterPerformanceMonitor" agentNaming.deployment.prometheus.agent: "Prometheus Agent" agentNaming.daemonset.apmia.host: "" agentNaming.daemonset.apmia.agent: "Kubernetes Agent" monitor.clusterPerformance.enabled: "true" monitor.clusterPerformance.limits.cpu: "2" monitor.clusterPerformance.limits.memory: "3000Mi" monitor.clusterPerformance.requests.cpu: "200m" monitor.clusterPerformance.requests.memory: "800Mi" # Property to set Kubernetes/OpenShift vertices TTL value in minutes. monitor.clusterPerformance.dataReporter.vertex.ttl: "1440" # Property to set Kubernetes/OpenShift vertices refresh interval in minutes. Refresh interval is generally double the value of TTL. monitor.clusterPerformance.dataReporter.vertex.refreshInterval: "480" monitor.clusterPerformance.dataReporter.limits.cpu: "2" monitor.clusterPerformance.dataReporter.limits.memory: "2000Mi" monitor.clusterPerformance.dataReporter.requests.cpu: "200m" monitor.clusterPerformance.dataReporter.requests.memory: "300Mi" monitor.application.opentracing.grpc.hostport: "" monitor.application.zipkinTracing.zipkinServer.hostport: "" agentNaming_dataReporter_nassClientEnabled: "false" opentracing_grpc_hostport: "" zipkinTracing_zipkinServer_hostport: "" cloudmonitoring.aws.enabled: "false" cloudmonitoring.aws.accessKey: "" cloudmonitoring.aws.secretKey: "" cloudmonitoring.aws.servicesList: "" cloudmonitoring.aws.limits.cpu: "2" cloudmonitoring.aws.limits.memory: "1024Mi" cloudmonitoring.aws.requests.cpu: "200m" cloudmonitoring.aws.requests.memory: "300Mi" agentNaming.deployment.aws.host: "" agentNaming.deployment.aws.process: "" agentNaming.deployment.aws.agent: "" agentNaming.deployment.httpCollector.host: "" agentNaming.deployment.httpCollector.process: "" openshift311Support: "true" architecture: "amd64" imageName: "oerth-scx.lvn.broadcom.net/docker-slim-ubi8:24.10.1.10" role: "common" EnableResourceLimits: "true" type: "Openshift" prefix: "k8s" #autoattach Properties monitor.application.autoattach.filterType: "whitelist" monitor.application.autoattach.customJdkTools.enabled: "false" monitor.application.autoattach.customJdkTools.imageName: "custom-jdk-tools:latest" monitor.application.autoattach.probe.enabled: "true" monitor.application.autoattach.probe.tlsEnabled: "true" monitor.application.autoattach.probe.webhookEnabled: "false" monitor.application.autoattach.probe.manageWebhookCaBundle: "false" monitor.application.autoattach.dynamicPropertyResolution.hostName: "{k8s_deployment_name},{k8s_daemonset_name},{k8s_deploymentconfig_name},{k8s_pod_name},ContainerHost" monitor.application.autoattach.dynamicPropertyResolution.agentName: "" #Java Auto Attach Properties monitor.application.autoattach.java.enabled: "true" monitor.application.autoattach.java.propertiesOverride: "" monitor.application.autoattach.java.customJdkTools.enabled: "false" monitor.application.autoattach.java.customJdkTools.imageName: "" #Dotnet Auto Attach Properties monitor.application.autoattach.dotnet.enabled: "true" monitor.application.autoattach.dotnet.propertiesOverride: "" #Nginx Auto Attach Properties monitor.application.autoattach.nginx.enabled: "true" #Php Auto Attach Properties monitor.application.autoattach.php.enabled: "true" monitor.application.jmx.enabled: "true" monitor.application.opentracing.enabled: "true" monitor.application.opentracing.grpc.hostport: "" monitor.application.opentracing.correlation.enabled: "false" monitor.application.zipkinTracing.enabled: "true" monitor.application.zipkinTracing.zipkinServer.hostport: "" monitor.httpCollector.enabled: "true" monitor.httpCollector.replicas: "1" monitor.httpCollector.server.host: "0.0.0.0" monitor.httpCollector.server.port: "8085" monitor.httpCollector.server.limits.cpu: "2" monitor.httpCollector.server.limits.memory: "3000Mi" monitor.httpCollector.server.requests.cpu: "2" monitor.httpCollector.server.requests.memory: "500Mi" monitor.httpCollector.memoryGrid.enabled: "" monitor.httpCollector.ingress.enabled: "false" monitor.httpCollector.ingress.host: "" monitor.httpCollector.ingress.ssl.enabled: "false" #monitor_application_container_dockerstats properties monitor.container.dockerstats.enabled: "true" monitor.container.dockerstats.daemonset.privileged: "true" monitor.container.dockerstats.daemonset.enableAllContainerRuntimes: "true" monitor.container.dockerstats.daemonset.apmia.epagent.port: "8889" monitor.container.dockerstats.daemonset.limits.cpu: "1" monitor.container.dockerstats.daemonset.limits.memory: "1024Mi" monitor.container.dockerstats.daemonset.requests.cpu: "200m" monitor.container.dockerstats.daemonset.requests.memory: "300Mi" monitor.node.enabled: "false" #monitor_kafka monitor.kafka.enabled: "true" monitor.kafka.consumergroups.metrics: "true" monitor.kafka.consumergroups.filter: "" monitor.kafka.jmx.modules: "kafka,jvm,memory" monitor.kafka.jmx.MBeans.include: "" monitor.kafka.jmx.MBeans.exclude: "" monitor.kafka.deployAsDaemonSet: "false" monitor.kafka.debug: "false" monitor.kafka.ssl.client.enabled: "false" monitor.kafka.ssl.client.truststoreKey.value: "" monitor.kafka.ssl.client.truststoreKey.valueFrom.secretKeyRef.name: "" monitor.kafka.ssl.client.truststoreKey.valueFrom.secretKeyRef.key: "" monitor.kafka.ssl.client.truststorePassword.value: "" monitor.kafka.ssl.client.keystoreKey.value: "" monitor.kafka.ssl.client.keystoreKey.valueFrom.secretKeyRef.name: "" monitor.kafka.ssl.client.keystoreKey.valueFrom.secretKeyRef.key: "" monitor.kafka.ssl.client.keystorePassword.value: "" monitor.kafka.ssl.client.keystoreKeyPassword.value: "" monitor.kafka.ssl.client.endpointIdentificationEnabled: "false" monitor.kafka.ssl.jmx.enabled: "false" monitor.kafka.ssl.jmx.truststoreKey.value: "" monitor.kafka.ssl.jmx.truststoreKey.valueFrom.secretKeyRef.name: "" monitor.kafka.ssl.jmx.truststoreKey.valueFrom.secretKeyRef.key: "" monitor.kafka.ssl.jmx.truststorePassword.value: "" monitor.kafka.ssl.jmx.keystoreKey.value: "" monitor.kafka.ssl.jmx.keystoreKey.valueFrom.secretKeyRef.name: "" monitor.kafka.ssl.jmx.keystoreKey.valueFrom.secretKeyRef.key: "" monitor.kafka.ssl.jmx.keystorePassword.value: "" monitor.kafka.sasl.plain.enabled: "false" monitor.kafka.sasl.plain.username: "" monitor.kafka.sasl.plain.password: "" monitor.kafka.limits.cpu: "1" monitor.kafka.limits.memory: "1024Mi" monitor.kafka.requests.cpu: "200m" monitor.kafka.requests.memory: "300Mi" #globalDeployment properties globalDeployment.mTLS: "true" globalDeployment.caCert: "" globalDeployment.serverCert: "" globalDeployment.clientCert: "" globalDeployment.serverKey: "" globalDeployment.clientKey: "" globalDeployment.mixedMode: "true" --- # Source: uma/templates/configmap_probeautoattach.yaml # Probe AutoAttach secrets and config map apiVersion: v1 kind: ConfigMap metadata: name: caaiops-probe-autoattach-configmap namespace: dx-uma data: probeTlsEnabled: "true" manageWebhookCaBundle: "false" probe-supported-types: nodejs,php,java,nginx nodejs-sidecarconfig.yaml: | containers: - name: nodejs-probe-autoattach image: caapm/universalmonitoragent:24.10.1.10 imagePullPolicy: IfNotPresent command: ["sh", "-c", "/nodeprobe/probe-init-script.sh"] volumeMounts: - name: probe-shared-files mountPath: /nodejs-probe - name: application-container env: - name: COLLECTOR_AGENT_HOST valueFrom: fieldRef: fieldPath: spec.nodeName - name: COLLECTOR_AGENT_PORT value: "5005" - name: CA_APM_PROBENAME valueFrom: fieldRef: fieldPath: metadata.name - name: CA_APM_APPNAME valueFrom: fieldRef: fieldPath: metadata.name - name: LOG_ENABLE_CONSOLE_MODE value: "enabled" command: ["sh", "-c", "sh /nodejs-probe/nodeprobe/probe-script.sh"] volumeMounts: - name: probe-shared-files mountPath: /nodejs-probe volumes: - name: probe-shared-files emptyDir: {} php-sidecarconfig.yaml: | containers: - name: php-probe-autoattach image: caapm/universalmonitoragent:24.10.1.10 imagePullPolicy: IfNotPresent command: ["sh", "-c", "/phpprobe/php-probe-init.sh"] volumeMounts: - name: phpprobe-shared-files mountPath: /php-probe - name: application-container env: - name: COLLECTOR_AGENT_HOST valueFrom: fieldRef: fieldPath: status.hostIP - name: COLLECTOR_AGENT_PORT value: "5005" - name: CA_APM_APPNAME valueFrom: fieldRef: fieldPath: metadata.name - name: APMENV_WILY_PHP_AGENT_PROGRAMNAME valueFrom: fieldRef: fieldPath: metadata.name command: ["sh", "-c", "sh /php-probe/php-probe.sh"] volumeMounts: - name: phpprobe-shared-files mountPath: /php-probe volumes: - name: phpprobe-shared-files emptyDir: {} mule-sidecarconfig.yaml: | containers: - name: mule-probe-autoattach image: caapm/universalmonitoragent:24.10.1.10 imagePullPolicy: IfNotPresent command: ["sh", "-c", "/javaprobe/mule-apm-init-script.sh"] volumeMounts: - name: apm-agent-shared-files mountPath: /apm-agent - name: application-container env: - name: apmenv_introscope_agent_hostName valueFrom: fieldRef: fieldPath: metadata.name - name: JAVAAGENT value: "-javaagent:/apm-agent/wily/Agent.jar" - name: AGENTPROFILE value: "-Dcom.wily.introscope.agentProfile=/apm-agent/wily/core/config/IntroscopeAgentMicro.profile" command: ["sh", "-c", "sh /apm-agent/javaprobe/mule-apm-start-script.sh"] volumeMounts: - name: apm-agent-shared-files mountPath: /apm-agent volumes: - name: apm-agent-shared-files emptyDir: {} java-sidecarconfig.yaml: | containers: - name: java-agent-autoattach image: caapm/universalmonitoragent:24.10.1.10 imagePullPolicy: IfNotPresent command: ["sh", "-c", "/javaprobe/java-apm-init-script.sh"] volumeMounts: - name: java-agent-shared-files mountPath: /java-agent - name: application-container env: volumeMounts: - name: java-agent-shared-files mountPath: /java-agent volumes: - name: java-agent-shared-files emptyDir: {} nginx-sidecarconfig.yaml: | containers: - name: nginx-probe-autoattach image: caapm/universalmonitoragent:24.10.1.10 imagePullPolicy: IfNotPresent command: ["sh", "-c", "/nginxprobe/nginx-probe-init.sh"] volumeMounts: - name: nginxprobe-shared-files mountPath: /nginx-probe - name: application-container env: - name: COLLECTOR_AGENT_HOST valueFrom: fieldRef: fieldPath: status.hostIP - name: COLLECTOR_AGENT_PORT value: "5005" - name: CA_APM_PROBENAME valueFrom: fieldRef: fieldPath: metadata.name - name: NGINX_PROBE_PATH value: "/nginx-probe" command: ["sh", "-c", "sh /nginx-probe/nginx-probe.sh"] volumeMounts: - name: nginxprobe-shared-files mountPath: /nginx-probe volumes: - name: nginxprobe-shared-files emptyDir: {} --- # Source: uma/templates/configmap_prometheus.yaml apiVersion: v1 kind: ConfigMap metadata: name: caaiops-config-prometheus namespace: dx-uma data: kubernetes_remote_monitoring_bundles_load: KubernetesRemoteMonitor default.properties: | prometheus.metric.sets: set1 prometheus.metric.set1.metric_list: prometheus.metric.set1.apm_metric_path: prometheus.metric.set1.apm_metric_type: LongFluctuating prometheus.metric.set1.aggregation_type: sum additional.label.sets: cadvisor.properties: | prometheus.metric.sets:set1,set2 prometheus.metric.set1.metric_list: container_cpu_usage_seconds_total,container_spec_cpu_quota,container_spec_cpu_period,container_cpu_usage_seconds_total,container_cpu_cfs_throttled_seconds_total,container_memory_working_set_bytes,container_memory_max_usage_bytes,container_memory_failcnt,container_fs_writes_bytes_total,container_fs_reads_bytes_total,container_network_receive_bytes_total,container_network_transmit_bytes_total,container_network_receive_packets_dropped_total,container_network_transmit_packets_dropped_total prometheus.metric.set1.apm_metric_path: Openshift|Namespaces|(aiops_namespace)|Pods|(aiops_pod)|Containers|(aiops_container), Openshift|Namespaces|(aiops_namespace)|Pods|(aiops_pod) prometheus.metric.set1.apm_metric_type: LongFluctuating prometheus.metric.set1.aggregation_type: sum prometheus.metric.set2.metric_list: container_spec_memory_limit_bytes prometheus.metric.set2.apm_metric_path: Openshift|Namespaces|(aiops_namespace)|Pods|(aiops_pod)|Containers|(aiops_container):container_memory_max_usage_bytes, Openshift|Namespaces|(aiops_namespace)|Pods|(aiops_pod):container_memory_max_usage_bytes prometheus.metric.set2.apm_metric_type: LongFluctuating prometheus.metric.set2.aggregation_type: sum additional.label.sets: set1, set2, set3, set4, set5, set6 additional.label.set1.source_label: namespace additional.label.set1.target_label: aiops_namespace additional.label.set2.source_label: pod_name additional.label.set2.target_label: aiops_pod additional.label.set3.source_label: container_name additional.label.set3.target_label: aiops_container additional.label.set4.source_label: container_label_io_kubernetes_pod_namespace additional.label.set4.target_label: aiops_namespace additional.label.set5.source_label: container_label_io_kubernetes_pod_name additional.label.set5.target_label: aiops_pod additional.label.set6.source_label: container_label_io_kubernetes_container_name additional.label.set6.target_label: aiops_container node-exporter.properties: | prometheus.metric.sets: cpu,cpuload,memavailable,memtotal,diskreadwrite,filesystemfree,filesystemsize,network prometheus.metric.cpu.metric_list: node_cpu_seconds_total prometheus.metric.cpu.apm_metric_path: Prometheus|(node_name)|CPU:total_(aiops_mode)_node_cpu_seconds_total,Prometheus|(node_name)|CPU|Core(aiops_cpu):(aiops_mode)_node_cpu_seconds_total prometheus.metric.cpu.apm_metric_type:LongFluctuating prometheus.metric.cpu.aggregation_type:sum prometheus.metric.cpuload.metric_list:node_load1 prometheus.metric.cpuload.apm_metric_path:Prometheus|(node_name)|CPU prometheus.metric.memavailable.metric_list:node_memory_MemAvailable_bytes, node_memory_MemAvailable prometheus.metric.memavailable.apm_metric_path:Prometheus|(node_name)|Memory:node_memory_MemAvailable_bytes prometheus.metric.memtotal.metric_list:node_memory_MemTotal_bytes,node_memory_MemTotal prometheus.metric.memtotal.apm_metric_path:Prometheus|(node_name)|Memory:node_memory_MemTotal_bytes prometheus.metric.diskreadwrite.metric_list:node_disk_written_bytes_total,node_disk_read_bytes_total prometheus.metric.diskreadwrite.apm_metric_path:Prometheus|(node_name)|Storage prometheus.metric.filesystemfree.metric_list:node_filesystem_free_bytes prometheus.metric.filesystemfree.apm_metric_path:Prometheus|(node_name)|Storage:(aiops_device)_node_filesystem_free_bytes, Prometheus|(node_name)|Storage|(aiops_device)|(mountpoint) prometheus.metric.filesystemsize.metric_list:node_filesystem_size_bytes prometheus.metric.filesystemsize.apm_metric_path:Prometheus|(node_name)|Storage:(aiops_device)_node_filesystem_size_bytes, Prometheus|(node_name)|Storage|(aiops_device)|(mountpoint) prometheus.metric.network.metric_list:node_network_receive_bytes_total, node_network_transmit_bytes_total, node_network_receive_drop_total, node_network_transmit_drop_total, node_network_transmit_errs_total, node_network_receive_errs_total prometheus.metric.network.apm_metric_path:Prometheus|(node_name)|Network, Prometheus|(node_name)|Network|(aiops_device) additional.label.sets:set1, set2, set3, set4 additional.label.set1.source_label:device additional.label.set1.target_label:aiops_device additional.label.set2.source_label:cpu additional.label.set2.target_label:aiops_cpu additional.label.set3.source_label:mode additional.label.set3.target_label:aiops_mode kube-state-metric.properties: | prometheus.metric.sets=node_status,pod_status,unschedulable_node,container_status, container_waiting_reason,node_allocatable_capacity, daemonset1,daemonset2, deployments1, deployments2 prometheus.metric.node_status.metric_list=kube_node_status_condition prometheus.metric.node_status.apm_metric_path=Openshift|Nodes:kube_node_status_condition_(condition)_(status), Openshift|Nodes|(node):kube_node_status_condition_(condition)_(status) prometheus.metric.pod_status.metric_list=kube_pod_status_phase prometheus.metric.pod_status.apm_metric_path=Openshift|Namespaces:kube_pod_status_phase_(phase), Openshift|Namespaces|(namespace):kube_pod_status_phase_(phase) prometheus.metric.unschedulable_node.metric_list=kube_node_spec_unschedulable prometheus.metric.unschedulable_node.apm_metric_path=Openshift|Nodes, Openshift|Nodes|(node) prometheus.metric.container_status.metric_list=kube_pod_container_status_running, kube_pod_container_status_terminated, kube_pod_container_status_waiting prometheus.metric.container_status.apm_metric_path=Openshift|Namespaces, Openshift|Namespaces|(namespace) prometheus.metric.container_waiting_reason.metric_list=kube_pod_container_status_waiting_reason prometheus.metric.container_waiting_reason.apm_metric_path=Openshift|Namespaces|(namespace)|kube_pod_container_status_waiting_reason:(reason) prometheus.metric.node_allocatable_capacity.metric_list=kube_node_status_allocatable_cpu_cores, kube_node_status_capacity_cpu_cores, kube_node_status_allocatable_memory_bytes , kube_node_status_capacity_memory_bytes, kube_node_status_allocatable_pods, kube_node_status_capacity_pods prometheus.metric.node_allocatable_capacity.apm_metric_path=Openshift|Nodes, Openshift|Nodes|(node) prometheus.metric.daemonset1.metric_list=kube_daemonset_status_number_unavailable, kube_daemonset_status_desired_number_scheduled, kube_daemonset_status_number_available, kube_daemonset_updated_number_scheduled prometheus.metric.daemonset1.apm_metric_path=Openshift|Namespaces|(namespace), Openshift|Namespaces|(namespace)|kube_daemonset|(daemonset) prometheus.metric.daemonset2.metric_list=kube_daemonset_status_number_misscheduled, kube_daemonset_status_current_number_scheduled, kube_daemonset_status_number_ready prometheus.metric.daemonset2.apm_metric_path=Openshift|Namespaces, Openshift|Namespaces|(namespace), Openshift|Namespaces|(namespace)|kube_daemonset|(daemonset) prometheus.metric.deployments1.metric_list=kube_deployment_status_replicas, kube_deployment_status_replicas_available, kube_deployment_status_replicas_unavailable prometheus.metric.deployments1.apm_metric_path=Openshift|Namespaces, Openshift|Namespaces|(namespace), Openshift|Namespaces|(namespace)|kube_deployment|(deployment) prometheus.metric.deployments2.metric_list=kube_deployment_status_replicas_updated, kube_deployment_status_observed_generation, kube_deployment_spec_replicas, kube_deployment_spec_paused prometheus.metric.deployments2.apm_metric_path=Openshift|Namespaces|(namespace), Openshift|Namespaces|(namespace)|kube_deployment|(deployment) haproxy-kube.properties: | prometheus.metric.sets:frontend, backend, server, frontend_status, backend_status, server_status, frontend_http_responses, backend_http_responses, server_http_responses, process, process_time, process_version prometheus.metric.backend.metric_list:haproxy_backend_bytes_in_total, haproxy_backend_bytes_out_total, haproxy_backend_sessions_total, haproxy_backend_current_sessions, Haproxy_backend_max_session_rate, haproxy_backend_connections_rate_max, haproxy_backend_connection_errors_total, haproxy_backend_current_queue, haproxy_backend_queue_time_average_seconds, haproxy_backend_response_time_average_seconds, haproxy_backend_agg_server_check_status prometheus.metric.backend.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Backends|(proxy) prometheus.metric.backend_status.metric_list:haproxy_backend_status prometheus.metric.backend_status.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Backends|(proxy):haproxy_backend_status_(state) prometheus.metric.backend_http_responses.metric_list:haproxy_backend_http_responses_total prometheus.metric.backend_http_responses.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Backends|(proxy)|HttpResponseCode|(code) prometheus.metric.frontend.metric_list:haproxy_frontend_bytes_in_total, haproxy_frontend_bytes_out_total, haproxy_frontend_sessions_total, haproxy_frontend_current_sessions, haproxy_frontend_max_sessions, haproxy_frontend_max_session_rate, haproxy_frontend_connections_total, haproxy_frontend_connections_rate_max prometheus.metric.frontend.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Frontends|(proxy) prometheus.metric.frontend_status.metric_list:haproxy_frontend_status prometheus.metric.frontend_status.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Frontends|(proxy):haproxy_frontend_status_(state) prometheus.metric.frontend_http_responses.metric_list:haproxy_frontend_http_responses_total prometheus.metric.frontend_http_responses.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Frontends|(proxy)|HttpResponseCode|(code) prometheus.metric.server.metric_list:haproxy_server_bytes_in_total, haproxy_server_bytes_out_total, haproxy_server_check_failures_total, haproxy_server_connection_errors_total, haproxy_server_connections_total, haproxy_server_current_queue, haproxy_server_current_session_rate, haproxy_server_current_sessions, haproxy_server_downtime_seconds_total, haproxy_server_http_average_response_latency_milliseconds, haproxy_server_max_session_rate, haproxy_server_response_errors_total prometheus.metric.server.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Services|(proxy)|Server|(server) prometheus.metric.server_status.metric_list:haproxy_server_status prometheus.metric.server_status.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Services|(proxy)|Server|(server):haproxy_server_status_(state) prometheus.metric.server_http_responses.metric_list:haproxy_server_http_responses_total prometheus.metric.server_http_responses.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Services|(proxy)|Server|(server)|HttpResponseCode|(code) prometheus.metric.process.metric_list:haproxy_process_bytes_out_total, haproxy_process_connections_total, haproxy_process_max_connections, haproxy_process_current_connections, haproxy_process_current_run_queue, haproxy_process_requests_total prometheus.metric.process.apm_metric_path:Openshift|HAProxyRouter|(scrap_url) prometheus.metric.process_version.metric_list:haproxy_process_build_info prometheus.metric.process_version.apm_metric_path:Openshift|HAProxyRouter|(scrap_url):haproxy_process_build_info_(version) prometheus.metric.process_version.apm_metric_type=VersionConst prometheus.metric.process_time.metric_list:haproxy_process_start_time_seconds prometheus.metric.process_time.apm_metric_path:Openshift|HAProxyRouter|(scrap_url):haproxy_process_start_time prometheus.metric.process_time.apm_metric_type=TimeFluctuating haproxy-ose.properties: | prometheus.metric.sets:frontend, backend, server, frontend_http_responses, backend_http_responses, server_http_responses, process, process_time, process_version prometheus.metric.backend.metric_list:haproxy_backend_bytes_in_total, haproxy_backend_bytes_out_total, haproxy_backend_sessions_total, haproxy_backend_current_sessions, Haproxy_backend_max_session_rate, haproxy_backend_connections_total, haproxy_backend_connection_errors_total, haproxy_backend_current_queue, haproxy_backend_up, prometheus.metric.backend.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Backends|(backend)|Projects|(namespace)|Services|(route) prometheus.metric.backend_http_responses.metric_list:haproxy_backend_http_responses_total prometheus.metric.backend_http_responses.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Backends|(backend)|Projects|(namespace)|Services|(route)|HttpResponseCode|(code) prometheus.metric.frontend.metric_list:haproxy_frontend_bytes_in_total, haproxy_frontend_bytes_out_total, haproxy_frontend_current_sessions, haproxy_frontend_max_sessions, haproxy_frontend_max_session_rate, haproxy_frontend_connections_total prometheus.metric.frontend.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Frontends|(frontend) prometheus.metric.frontend_http_responses.metric_list:haproxy_frontend_http_responses_total prometheus.metric.frontend_http_responses.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Frontends|(frontend)|HttpResponseCode|(code) prometheus.metric.server.metric_list:haproxy_server_bytes_in_total, haproxy_server_bytes_out_total, haproxy_server_check_failures_total, haproxy_server_connection_errors_total, haproxy_server_connections_total, haproxy_server_current_queue, haproxy_server_current_session_rate, haproxy_server_current_sessions, haproxy_server_downtime_seconds_total, haproxy_server_http_average_response_latency_milliseconds, haproxy_server_max_session_rate, haproxy_server_up, haproxy_server_response_errors_total prometheus.metric.server.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Projects|(namespace)|Services|(service)|Pods|(pod)|Server|(server) prometheus.metric.server_http_responses.metric_list:haproxy_server_http_responses_total prometheus.metric.server_http_responses.apm_metric_path:Openshift|HAProxyRouter|(scrap_url)|Projects|(namespace)|Services|(service)|Pods|(pod)|Server|(server)|HttpResponseCode|(code) prometheus.metric.process.metric_list:haproxy_process_bytes_out_total, haproxy_process_connections_total, haproxy_process_max_connections, haproxy_process_current_connections, haproxy_process_current_run_queue, haproxy_process_requests_total prometheus.metric.process.apm_metric_path:Openshift|HAProxyRouter|(scrap_url) prometheus.metric.process_version.metric_list:haproxy_process_build_info prometheus.metric.process_version.apm_metric_path:Openshift|HAProxyRouter|(scrap_url):haproxy_process_build_info_(version) prometheus.metric.process_version.apm_metric_type=VersionConst prometheus.metric.process_time.metric_list:haproxy_process_start_time_seconds prometheus.metric.process_time.apm_metric_path:Openshift|HAProxyRouter|(scrap_url) prometheus.metric.process_time.apm_metric_type=TimeFluctuating coredns.properties: | prometheus.metric.sets=build,cache,counter,request,response,forward,panic prometheus.metric.build.metric_list=coredns_build_info prometheus.metric.build.apm_metric_path=Openshift|Coredns|Build_info|(goversion)|Revision|(revision)|Version|(version) prometheus.metric.cache.metric_list=coredns_cache_misses_total prometheus.metric.cache.apm_metric_path=Openshift|Coredns|Cache|Server|(server) prometheus.metric.counter.metric_list=coredns_dns_request_count_total prometheus.metric.counter.apm_metric_path=Openshift|Coredns|Request|Family|(proto)|Server|(server)|zone-(zone) prometheus.metric.request.metric_list=coredns_dns_request_type_count_total prometheus.metric.request.apm_metric_path=Openshift|Coredns|Request|Server|(server)|Query_type|(type)|Zone-(zone) prometheus.metric.response.metric_list=coredns_dns_response_rcode_count_total prometheus.metric.response.apm_metric_path=Openshift|Coredns|Response|(rcode)|Server|(server)|Zone-(zone) prometheus.metric.forward.metric_list=coredns_forward_healthcheck_failure_count_total,coredns_forward_sockets_open prometheus.metric.forward.apm_metric_path=Openshift|Coredns|Forward|Destination_IP|(to) prometheus.metric.panic.metric_list=coredns_panic_count_total prometheus.metric.panic.apm_metric_path=Openshift|Coredns additional.label.sets=set1 additional.label.set1.source_label=family additional.label.set1.target_label=Family etcd.properties: | prometheus.metric.sets=server,disk,network,debugging,grpc,snapdb,process,mvcc,go prometheus.metric.server.metric_list=etcd_server_go_version, etcd_server_has_leader, etcd_server_health_failures, etcd_server_health_success, etcd_server_heartbeat_send_failures_total, etcd_server_id, etcd_server_is_leader,etcd_server_leader_changes_seen_total, etcd_server_proposals_applied_total, etcd_server_proposals_committed_total, etcd_server_proposals_failed_total, etcd_server_proposals_pending, etcd_server_quota_backend_bytes,etcd_server_read_indexes_failed_total, etcd_server_slow_apply_total, etcd_server_slow_read_indexes_total, etcd_server_snapshot_apply_in_progress_total, etcd_server_version prometheus.metric.server.apm_metric_path=Openshift|etcd|server prometheus.metric.disk.metric_list=etcd_disk_backend_commit_duration_seconds, etcd_disk_backend_defrag_duration_seconds, etcd_disk_backend_snapshot_duration_seconds, etcd_disk_wal_fsync_duration_seconds prometheus.metric.disk.apm_metric_path=Openshift|etcd|disk prometheus.metric.network.metric_list=etcd_network_client_grpc_received_bytes_total, etcd_network_client_grpc_sent_bytes_total, etcd_network_peer_received_bytes_total, etcd_network_peer_received_failures_total, etcd_network_peer_round_trip_time_seconds etcd_network_peer_sent_bytes_total, etcd_network_peer_sent_failures_total prometheus.metric.network.apm_metric_path=Openshift|etcd|network prometheus.metric.debugging.metric_list=etcd_debugging_mvcc_compact_revision, etcd_debugging_mvcc_current_revision, etcd_debugging_mvcc_db_compaction_keys_total, etcd_debugging_mvcc_db_compaction_pause_duration_milliseconds, etcd_debugging_mvcc_db_compaction_total_duration_milliseconds, etcd_debugging_mvcc_db_total_size_in_bytes, etcd_debugging_mvcc_delete_total, etcd_debugging_mvcc_events_total, etcd_debugging_mvcc_index_compaction_pause_duration_milliseconds, etcd_debugging_mvcc_keys_total, etcd_debugging_mvcc_pending_events_total, etcd_debugging_mvcc_put_total, etcd_debugging_mvcc_range_total, etcd_debugging_mvcc_slow_watcher_total, etcd_debugging_mvcc_total_put_size_in_bytes, etcd_debugging_mvcc_txn_total, etcd_debugging_mvcc_watch_stream_total, etcd_debugging_mvcc_watcher_total, etcd_debugging_server_lease_expired_total, etcd_debugging_snap_save_marshalling_duration_seconds, etcd_debugging_snap_save_total_duration_seconds, etcd_debugging_store_expires_total, etcd_debugging_store_reads_total, etcd_debugging_store_watch_requests_total, etcd_debugging_store_watchers, etcd_debugging_store_writes_total prometheus.metric.debugging.apm_metric_path=Openshift|etcd|debugging prometheus.metric.grpc.metric_list=etcd_grpc_proxy_cache_hits_total, etcd_grpc_proxy_cache_keys_total, etcd_grpc_proxy_cache_misses_total, etcd_grpc_proxy_events_coalescing_total, etcd_grpc_proxy_watchers_coalescing_total prometheus.metric.grpc.apm_metric_path=Openshift|etcd|grpc prometheus.metric.snapdb.metric_list=etcd_snap_db_save_total_duration_seconds,etcd_snap_db_fsync_duration_seconds prometheus.metric.snapdb.apm_metric_path=Openshift|etcd|snapshot prometheus.metric.process.metric_list=process_open_fds, process_max_fds, process_cpu_seconds_total, process_resident_memory_bytes, process_start_time_seconds, process_virtual_memory_bytes prometheus.metric.process.apm_metric_path=Openshift|etcd|process prometheus.metric.mvcc.metric_list=etcd_mvcc_db_total_size_in_bytes, etcd_mvcc_db_total_size_in_use_in_bytes, etcd_mvcc_hash_duration_seconds, etcd_mvcc_hash_rev_duration_seconds prometheus.metric.mvcc.apm_metric_path=Openshift|etcd|mvcc apiserver.properties: | prometheus.metric.sets=apiserver,watchers,root,apiservices,etcd,threads,memory,process,clients,workqueue,etcdobj,lable4,lable5 prometheus.metric.apiserver.metric_list=apiserver_response_sizes,apiserver_response_sizes_sum,apiserver_response_sizes_count,apiserver_request_count,apiserver_longrunning_gauge,apiserver_request_duration_seconds,apiserver_request_duration_seconds_sum,apiserver_request_duration_seconds_count,apiserver_request_latencies,apiserver_request_latencies_sum,apiserver_request_latencies_count,apiserver_request_total prometheus.metric.apiserver.apm_metric_path=Openshift|apiserver|(scope)|(resource)|(verb) prometheus.metric.apiserver.apm_metric_type=LongFluctuating prometheus.metric.apiserver.aggregation_type=sum prometheus.metric.watchers.metric_list=apiserver_registered_watchers,apiserver_watch_events_sizes,apiserver_watch_events_sizes_sum,apiserver_watch_events_sizes_count,apiserver_watch_events_total,apiserver_registered_watchers,apiserver_watch_events_total prometheus.metric.watchers.apm_metric_path=Openshift|apiserver|kinds|(kind) prometheus.metric.watchers.apm_metric_type=LongFluctuating prometheus.metric.watchers.aggregation_type=sum prometheus.metric.root.metric_list=APIServiceOpenAPIAggregationControllerQueue1_adds,APIServiceOpenAPIAggregationControllerQueue1_depth, APIServiceOpenAPIAggregationControllerQueue1_queue_latency, APIServiceOpenAPIAggregationControllerQueue1_retries,APIServiceOpenAPIAggregationControllerQueue1_work_duration,APIServiceRegistrationController_adds, APIServiceRegistrationController_depth, APIServiceRegistrationController_queue_latency,APIServiceRegistrationController_work_duration,AvailableConditionController_adds,AvailableConditionController_depth,AvailableConditionController_retries,AvailableConditionController_queue_latency,AvailableConditionController_work_duration,DiscoveryController_adds,DiscoveryController_work_duration,admission_quota_controller_work_duration,admission_quota_controller_queue_latency,apiserver_current_inflight_requests,apiserver_request_duration_seconds,apiserver_request_duration_seconds_sum,apiserver_request_duration_seconds_count prometheus.metric.root.apm_metric_path=Openshift|apiserver prometheus.metric.root.apm_metric_type=LongFluctuating prometheus.metric.root.aggregation_type=sum prometheus.metric.apiservices.metric_list=apiserver_storage_data_key_generation_failures_total,apiserver_storage_envelope_transformation_cache_misses_total,apiserver_watch_events_sizes,apiserver_watch_events_sizes_sum,apiserver_watch_events_sizes_count prometheus.metric.apiservices.apm_metric_path=Openshift|apiserver|apiservices prometheus.metric.apiservices.apm_metric_type=LongFluctuating prometheus.metric.apiservices.aggregation_type=sum prometheus.metric.etcd.metric_list=etcd_request_duration_seconds,etcd_request_duration_seconds_sum,etcd_request_duration_seconds_count prometheus.metric.etcd.apm_metric_path=Openshift|etcd|(operation)|(type) prometheus.metric.etcd.apm_metric_type=LongFluctuating prometheus.metric.etcd.aggregation_type=sum prometheus.metric.threads.metric_list=go_goroutines,go_threads prometheus.metric.threads.apm_metric_path=Openshift|apiserver|threads prometheus.metric.memory.metric_list=go_gc_duration_seconds,go_gc_duration_seconds_sum,go_gc_duration_seconds_count,go_memstats_alloc_bytes,go_memstats_frees_total,go_memstats_heap_alloc_bytes,go_memstats_heap_idle_bytes,go_memstats_heap_inuse_bytes,go_memstats_heap_objects,go_memstats_alloc_bytes_total prometheus.metric.memory.apm_metric_path=Openshift|apiserver|memory prometheus.metric.process.metric_list=process_virtual_memory_bytes,process_resident_memory_bytes,process_max_fds,process_open_fds prometheus.metric.process.apm_metric_path=Openshift|apiserver|process prometheus.metric.clients.metric_list=grpc_client_handled_total,rest_client_request_duration_seconds,rest_client_request_duration_seconds_sum,rest_client_request_duration_seconds_count,rest_client_requests_total,rest_client_requests_total_sum,rest_client_requests_total_count prometheus.metric.clients.apm_metric_path=Openshift|apiserver|clients prometheus.metric.workqueue.metric_list=autoregister_adds,autoregister_depth,autoregister_queue_latency,autoregister_retries,autoregister_work_duration,controller_clusterquotamappingcontroller_clusterquotas_adds,controller_clusterquotamappingcontroller_clusterquotas_depth,controller_clusterquotamappingcontroller_clusterquotas_queue_latency,controller_clusterquotamappingcontroller_clusterquotas_retries,controller_clusterquotamappingcontroller_clusterquotas_work_duration,controller_clusterquotamappingcontroller_namespaces_adds,controller_clusterquotamappingcontroller_namespaces_depth,controller_clusterquotamappingcontroller_namespaces_queue_latency,controller_clusterquotamappingcontroller_namespaces_retries,controller_clusterquotamappingcontroller_namespaces_work_duration,crdEstablishing_adds,crdEstablishing_depth,crdEstablishing_queue_latency,crdEstablishing_retries,workqueue_queue_duration_seconds,workqueue_queue_duration_seconds_sum,workqueue_queue_duration_seconds_count,workqueue_work_duration_seconds,workqueue_work_duration_seconds_sum,workqueue_work_duration_seconds_count prometheus.metric.workqueue.apm_metric_path=Openshift|apiserver|workqueue prometheus.metric.workqueue.apm_metric_type=LongFluctuating prometheus.metric.workqueue.aggregation_type=sum prometheus.metric.lable4.metric_list=workqueue_adds_total,workqueue_depth,workqueue_longest_running_processor_seconds,workqueue_retries_total,workqueue_queue_duration_seconds_bucket,workqueue_unfinished_work_seconds prometheus.metric.lable4.apm_metric_path=Openshift|apiserver|workqueue|(name) prometheus.metric.etcdobj.metric_list=etcd_object_counts,apiserver_storage_object_counts prometheus.metric.etcdobj.apm_metric_path=Openshift|etcd|(resource) prometheus.metric.etcdobj.apm_metric_type=LongFluctuating prometheus.metric.etcdobj.aggregation_type=sum prometheus.metric.lable5.metric_list=grpc_client_handled_total,grpc_client_msg_sent_total,grpc_client_msg_received_total,grpc_client_started_total prometheus.metric.lable5.apm_metric_path=Openshift|apiserver|clients|grpc|method|(grpc_method)|service|grpc_service|type|(grpc_type) --- # Source: uma/templates/webhook_probeautoattach.yaml # Config Map for Dynamic Webhook creation or update caBundle by apm-probe-autoattach apiVersion: v1 kind: ConfigMap metadata: name: caaiops-probe-webhook-configmap namespace: dx-uma data: webhook-namespace: dx-uma webhook-version: "v1beta1" --- # Source: uma/templates/role.yaml apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: broadcom-uma-clusterrole rules: - nonResourceURLs: - /metrics verbs: - get - apiGroups: - operators.coreos.com - monitoring.coreos.com - discovery.k8s.io - node.k8s.io - policy resources: - '*' verbs: - get - list - watch - update - apiGroups: - "" resources: - nodes - nodes/stats - nodes/metrics - nodes/proxy - services - pods - events - pods/exec - resourcequotas - replicationcontrollers - limitranges - namespaces - persistentvolumes - persistentvolumeclaims - endpoints - ingresses - prometheuses - configmaps verbs: - get - update - list - watch - apiGroups: - apps - admissionregistration.k8s.io resources: - deployments - statefulsets - replicasets - daemonsets - cronjobs - jobs - mutatingwebhookconfigurations verbs: - get - list - watch - update - apiGroups: - extensions - batch resources: - deployments - statefulsets - replicasets - daemonsets - cronjobs - jobs - ingresses - namespaces verbs: - get - list - watch - update - apiGroups: - "" resources: - secrets verbs: - get - list - update - apiGroups: - extensions - "networking.k8s.io" # k8s 1.14+ resources: - ingresses verbs: - get - list - watch - apiGroups: - ca.broadcom.com resources: - '*' verbs: - '*' - apiGroups: - security.openshift.io resources: - securitycontextconstraints resourceNames: - anyuid - privileged verbs: - use - apiGroups: - extensions resources: - podsecuritypolicies resourceNames: - ibm-privileged-psp verbs: - use - apiGroups: - apps.openshift.io resources: - deploymentconfigs verbs: - get - list - watch - update --- # Source: uma/templates/role_binding.yaml # ClusterRoleBinding for binding with Service Account apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: broadcom-uma-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: broadcom-uma-clusterrole subjects: - kind: ServiceAccount name: uma namespace: dx-uma --- # Source: uma/templates/deployment_apmia.yaml apiVersion: v1 kind: Service metadata: name: opentracing namespace: dx-uma spec: selector: app: apmia-deployment ports: - port: 8888 protocol: TCP targetPort: 8888 type: ClusterIP --- # Source: uma/templates/deployment_httpcollector.yaml apiVersion: v1 kind: Service metadata: name: apmia-http-collector-web namespace: dx-uma labels: app: apmia-http-collector spec: clusterIP: None selector: app: apmia-http-collector ports: - name: hc-port-1 port: 8085 protocol: TCP targetPort: 8085 --- # Source: uma/templates/deployment_probeautoattach.yaml apiVersion: v1 kind: Service metadata: name: apm-probe-autoattach-svc namespace: dx-uma labels: app: apm-probe-autoattach spec: ports: - port: 443 targetPort: 8443 type: ClusterIP selector: app: apm-probe-autoattach --- # Source: uma/templates/service_acc.yaml # Service for acc controller apiVersion: v1 kind: Service metadata: name: controller namespace: dx-uma spec: selector: module: acc-controller ports: - port: 9090 type: ClusterIP --- # Source: uma/templates/service_clusterdiscovery.yaml # Service for Cluster Discovery apiVersion: v1 kind: Service metadata: name: clusterinfo namespace: dx-uma spec: selector: module: clusterinfo ports: - name: https port: 8443 - name: http port: 8080 - name: httpactu port: 8081 type: ClusterIP --- # Source: uma/templates/daemonset_apmia.yaml # DaemonSet Configuration # Agent from Container1 will be reported to |Infrastructure|Agent # Agent from Container2 will be reported to |Application|JMX Agent apiVersion: apps/v1 kind: DaemonSet metadata: name: app-container-monitor namespace: dx-uma labels: tier: monitoring app: cagent spec: # oc adm policy add-scc-to-user privileged -z default selector: matchLabels: app: caagent template: metadata: labels: app: caagent annotations: ca.broadcom.application.name: container-monitoring spec: hostPID: true hostIPC: true hostNetwork: true tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master - effect: NoSchedule key: node-role.kubernetes.io/storage - effect: NoSchedule operator: Exists - effect: NoExecute operator: Exists serviceAccountName: uma initContainers: - name: init-clusterinfo image: caapm/universalmonitoragent:24.10.1.10 command: ['bash', '-c', 'until [ $(curl --connect-timeout 5 --max-time 10 --write-out %{http_code} --silent --output /dev/null ${CLUSTERINFO_SERVICE_HOST}:${CLUSTERINFO_SERVICE_PORT_HTTPACTU}/actuator/health) -eq 200 ]; do sleep 2; done'] securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL resources: limits: cpu: 20m memory: 20Mi requests: cpu: 10m memory: 10Mi containers: - name: containerinfo image: caapm/universalmonitoragent:24.10.1.10 imagePullPolicy: Always command: ["/start.sh"] securityContext: allowPrivilegeEscalation: true seccompProfile: type: RuntimeDefault privileged: true runAsUser: 0 env: - name: CAAPM_CONTAINERINFO_AGENT_VERSION value: - name: CAAPM_CONTAINERINFO_AGENT_MANAGER_URL valueFrom: configMapKeyRef: key: agentManager.url name: caaiops-config-common - name: CAAPM_CONTAINERINFO_AGENT_MANAGER_CREDENTIAL valueFrom: configMapKeyRef: key: agentManager.credential name: caaiops-config-common - name: CAAPM_CONTAINERINFO_PORT value: "7777" - name: CAAPM_CONTAINERINFO_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: CAAPM_CONTAINERINFO_CLUSTER_NAME valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: CAAPM_CONTAINERINFO_MICROSERVICESAGENT_ENABLED valueFrom: configMapKeyRef: key: microservices_agent_enabled name: caaiops-config-common - name: CAAPM_CONTAINERINFO_MICROSERVICESAGENT_ENABLE_KERNEL_VERSION_CHECK valueFrom: configMapKeyRef: key: microservices_agent_enable_kernel_version_check name: caaiops-config-common - name: CAAPM_CONTAINERINFO_MICROSERVICESAGENT_FILTER_TYPE valueFrom: configMapKeyRef: key: microservices_agent_filterType name: caaiops-config-common - name: CAAPM_CONTAINERINFO_MICROSERVICESAGENT_TRACER_ENABLE_HTTP_TRACING valueFrom: configMapKeyRef: key: microservices_agent_tracer_enableHttpTracer name: caaiops-config-common - name: CAAPM_CONTAINERINFO_MICROSERVICESAGENT_TRACER_ENABLE_OPENSSL_TRACING valueFrom: configMapKeyRef: key: microservices_agent_tracer_enableHttpsTracer name: caaiops-config-common - name: CAAPM_CONTAINERINFO_MICROSERVICESAGENT_TRACER_ENABLE_MYSQL_TRACING valueFrom: configMapKeyRef: key: microservices_agent_tracer_enableMySQLTracer name: caaiops-config-common - name: CAAPM_CONTAINERINFO_MICROSERVICESAGENT_TRACER_ENABLE_APPFLOW_TOPOLOGY valueFrom: configMapKeyRef: key: microservices_agent_tracer_enableAppFlow name: caaiops-config-common - name: CAAPM_CONTAINERINFO_MICROSERVICESAGENT_AGENT_BRIDGE_ENABLED valueFrom: configMapKeyRef: key: microservices_agent_enable_agentPerApplication name: caaiops-config-common - name: CAAPM_CONTAINERINFO_LOG_LEVEL value: "INFO" - name: CAAPM_CONTAINERINFO_STATS_INTERVAL value: "60" - name: CAAPM_CONTAINERINFO_MUTUAL_TLS value: "true" resources: limits: cpu: 1 memory: 1024Mi requests: cpu: 200m memory: 300Mi volumeMounts: - name: sys-kernel-debug mountPath: /sys/kernel/debug readOnly: true - name: criosock mountPath: /var/run/crio/crio.sock readOnly: true - name: criooverlay mountPath: /host/var/lib/containers/storage/overlay readOnly: true - name: criooverlaycontainers mountPath: /host/var/lib/containers/storage/overlay-containers readOnly: true - name: criooverlayimages mountPath: /host/var/lib/containers/storage/overlay-images readOnly: true - name: dockersock mountPath: /var/run/docker.sock readOnly: true - name: containerdsock mountPath: /run/containerd/containerd.sock readOnly: true - name: containerdekssock mountPath: /run/dockershim.sock readOnly: true - name: dockeroverlay mountPath: /host/var/lib/docker/overlay readOnly: true - name: dockeroverlay2 mountPath: /host/var/lib/docker/overlay2 readOnly: true - name: sys mountPath: /host/sys readOnly: true - name: proc mountPath: /host/proc readOnly: true - name: apm-containerinfo-clientcerts mountPath: /etc/containerinfo/certs readOnly: true - env: - name: agentManager_url_1 valueFrom: configMapKeyRef: key: agentManager.url name: caaiops-config-common - name: agentManager_credential valueFrom: configMapKeyRef: key: agentManager.credential name: caaiops-config-common - name: apmenv_introscope_agent_connection_compatibility_version valueFrom: configMapKeyRef: key: agentManager.version name: caaiops-config-common - name: type value: Openshift - name: interval value: "60" - name: DOCKER_STATS_CGROUP_MODE value: enabled - name: apmenv_introscope_agent_log_destination value: "console, logfile" - name: apmenv_introscope_epagent_config_httpServerPort value: "8889" - name: apmenv_introscope_agent_remotejmx_config_modules value: "jvm,memory" - name: apmenv_introscope_agent_extensions_bundles_load value: NodeExtension,PhpExtension,PythonExtension,CppExtension,autoattach,OpenshiftMonitor,HostMonitor,KubernetesRemoteMonitor, UMAAgentController ########################## # Auto Attach properties: ########################## # Free memory threshold required for attaching to containers (%) #- name: apmenv_autoattach_free_memory_threshold # value: "50.00" # Interval for attaching to memory unbounded containers (sec). One unbounded container will be attached to per interval. #- name: apmenv_autoattach_unbounded_container_attach_interval # value: "300" # Operation Mode for Auto Attach (whitelist/blacklist). # whitelist: will only attach to processes marked with env variable CA_APM_MONITORING_ENABLED=true # blacklist: will attach to all processes except those marked with env variable CA_APM_MONITORING_ENABLED=false - name: apmenv_autoattach_filter_type value: "whitelist" - name: SEND_SUSTAINABILITY_EVENTS value: "false" - name: apmenv_autoattach_customJdkTools_enabled value: "false" - name: apmenv_autoattach_dynamicPropertyResolution_enabled value: "true" - name: apmenv_autoattach_dynamicPropertyResolution_properties_list value: "introscope.agent.hostName,com.wily.introscope.agent.agentName,introscope.agent.customProcessName" - name: apmenv_autoattach_introscope_agent_hostName value: "{k8s_deployment_name},{k8s_daemonset_name},{k8s_deploymentconfig_name},{k8s_pod_name},ContainerHost" - name: apmenv_autoattach_com_wily_introscope_agent_agentName value: "" - name: apmenv_autoattach_introscope_agent_customProcessName value: "" - name: apmenv_autoattach_java_enabled_applications value: " " # Properties for aggressively monitoring every JVM - name: apmenv_autoattach_java_proactiveMode_enabled value: "true" - name: apmenv_autoattach_java_proactiveMode_agent_load_delay value: "300" - name: apmenv_autoattach_java_proactiveMode_agent_extra_properties value: "introscope.agent.remotejmx.system.s1.config.modules=jvm," - name: apmenv_autoattach_java_agent_sidecar_enabled value: "true" - name: apmenv_autoattach_java_agent_mode value: "default" - name: apmenv_autoattach_net_enabled_applications value: " " # Properties for aggressively monitoring every.NET Runtime - name: apmenv_autoattach_net_proactiveMode_enabled value: "true" #- name: apmenv_autoattach_net_proactiveMode_agent_load_delay # value: "30" # Properties for monitoring Probe AutoAttach - name: apmenv_autoattach_nginx_enabled value: "true" - name: apmenv_probe_autoattach_tls_enabled value: "true" - name: apmenv_autoattach_nodejs_enabled value: "false" # This feature for enabling auto attach php probe into PHP application deployment # apmenv_autoattach_php_enabled - to switch on/off PHP application discovery and attach - name: apmenv_autoattach_php_enabled value: "true" # Properties for trace logging #- name: apmenv_log4j_logger_IntroscopeAgent # value: "TRACE#com.wily.util.feedback.Log4JSeverityLevel, logfile" #- name: apmenv_log4j_logger_IntroscopeAgent_AutoAttach # value: "TRACE#com.wily.util.feedback.Log4JSeverityLevel, logfile" #- name: apmenv_log4j_additivity_IntroscopeAgent_AutoAttach # value: "false" #- name: apmenv_log4j_appender_logfile_MaxFileSize # value: "200MB" #- name: apmenv_log4j_appender_logfile_MaxBackupIndex # value: "10" - name: apmenv_com_ca_apm_kubernetes_remote_monitor_type value: jmx,prometheus - name: apmenv_com_ca_apm_kubernetes_autodiscovery_local value: "true" - name: nodename valueFrom: fieldRef: fieldPath: spec.nodeName - name: pod_ext_type value: "app-container-monitor" - name: apmenv_introscope_agent_hostmonitor_profiles_localhost_linux_groups value: memory,cpu,network,diskio,hostresources,application,protocol,systemmanagement,performance,process,ipc,distributedsystem,topprocess,operatingsystem,storage,systemload,port - name: apmenv_com_ca_apm_kubernetes_prometheus_datafile value: node-exporter, cadvisor - name: apmenv_introscope_agent_hostName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_introscope_agent_customProcessName valueFrom: fieldRef: fieldPath: spec.nodeName - name: apmenv_introscope_agent_processName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_introscope_agent_agentName valueFrom: configMapKeyRef: key: agentNaming.daemonset.apmia.agent name: caaiops-config-common - name: apmenv_com_wily_instroscope_agent_metric_calculators_k8spod value: "/usr/local/openshift/apmia/extensions/OpenshiftMonitor/config/calculator/podCalculator.json" - name: MIN_HEAP_VAL_IN_MB value: "64" - name: MAX_HEAP_VAL_IN_MB value: "724" - name: REDUCE_METRIC_EXPLOSION value: "false" - name: apmenv_com_ca_apm_kubernetes_monitor_legacy value: "false" - name: CAAPM_CONTAINERINFO_MUTUALTLS value: "true" - name: apmenv_com_ca_apm_podmonitor_vertex_ttltime_minutes valueFrom: configMapKeyRef: key: monitor.clusterPerformance.dataReporter.vertex.ttl name: caaiops-config-common - name: apmenv_introscope_agent_hostmonitor_vertex_ttl_minutes valueFrom: configMapKeyRef: key: monitor.clusterPerformance.dataReporter.vertex.ttl name: caaiops-config-common - name: apmenv_com_ca_apm_podmonitor_vertex_refreshtime_minutes valueFrom: configMapKeyRef: key: monitor.clusterPerformance.dataReporter.vertex.refreshInterval name: caaiops-config-common - name: apmenv_introscope_agent_hostmonitor_vertex_refreshInterval valueFrom: configMapKeyRef: key: monitor.clusterPerformance.dataReporter.vertex.refreshInterval name: caaiops-config-common - name: CAAPM_CLUSTERINFO_MUTUALTLS value: "true" - name: apmenv_com_ca_apm_acc_extType value: "podmonitor" - name: apmenv_com_ca_apm_acc_namespace value: dx-uma - name: apmenv_com_ca_apm_clusterType value: "Openshift" - name: apmenv_com_ca_apm_clusterName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_com_ca_apm_kubernetes_clusterName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: UMAAGENT_ACC_CONTROLLER_ENABLED value: "true" name: podmonitor image: caapm/universalmonitoragent:24.10.1.10 resources: limits: cpu: 1 memory: 1024Mi requests: cpu: 200m memory: 300Mi livenessProbe: httpGet: path: /healthz port: 8889 initialDelaySeconds: 120 failureThreshold: 2 periodSeconds: 180 imagePullPolicy: Always # resources: # limits: # cpu: 500m # memory: 700Mi # requests: # cpu: 200m # memory: 300Mi securityContext: allowPrivilegeEscalation: true seccompProfile: type: RuntimeDefault privileged: true capabilities: add: - SYS_RESOURCE drop: - ALL volumeMounts: - name: dev mountPath: /host/dev readOnly: true - name: run mountPath: /host/run readOnly: true - name: sys mountPath: /host/sys readOnly: true - name: boot mountPath: /host/boot readOnly: true - name: home mountPath: /host/home readOnly: true - name: proc mountPath: /host/proc readOnly: true - name: rootfs mountPath: /host readOnly: true - name: config-volume mountPath: /usr/local/openshift/apmia/extensions/KubernetesRemoteMonitor/config/prometheus - name: apm-prometheus-exporter-certs mountPath: /etc/prometheus/certs readOnly: true - name: apm-probe-autoattach-clientcerts mountPath: /etc/probeautoattach/certs readOnly: true - name: apm-containerinfo-clientcerts mountPath: /etc/containerinfo/certs readOnly: true - name: apm-podmonitor-certs mountPath: /etc/clusterinfo/certs readOnly: true volumes: - name: sys-kernel-debug hostPath: path: /sys/kernel/debug - name: criosock hostPath: path: /var/run/crio/crio.sock - name: criooverlaycontainers hostPath: path: /var/lib/containers/storage/overlay-containers - name: criooverlayimages hostPath: path: /var/lib/containers/storage/overlay-images - name: criooverlay hostPath: path: /var/lib/containers/storage/overlay - name: dockersock hostPath: path: /var/run/docker.sock - name: containerdsock hostPath: path: /run/containerd/containerd.sock - name: containerdekssock hostPath: path: /run/dockershim.sock - name: dockeroverlay hostPath: path: /var/lib/docker/overlay - name: dockeroverlay2 hostPath: path: /var/lib/docker/overlay2 - name: dev hostPath: path: /dev - name: run hostPath: path: /run - name: sys hostPath: path: /sys - name: boot hostPath: path: /boot - name: home hostPath: path: /home - name: rootfs hostPath: path: / - name: proc hostPath: path: /proc - name: config-volume configMap: name: caaiops-config-prometheus - name: apm-prometheus-exporter-certs secret: secretName: caaiops-uma-certs - name: apm-probe-autoattach-clientcerts secret: secretName: caaiops-probe-autoattach-certs - name: apm-containerinfo-clientcerts secret: secretName: caaiops-uma-certs - name: apm-podmonitor-certs secret: secretName: caaiops-uma-certs --- # Source: uma/templates/deployment_acc.yaml # Cluster Discovery Service apiVersion: apps/v1 kind: Deployment metadata: name: acc-controller namespace: dx-uma labels: app: cagent module: acc-controller spec: replicas: 1 template: metadata: name: acc-controller labels: app: cagent module: acc-controller spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master - effect: NoSchedule key: node-role.kubernetes.io/storage - effect: NoSchedule operator: Exists - effect: NoExecute operator: Exists serviceAccountName: uma initContainers: - name: init-clusterinfo image: caapm/universalmonitoragent:24.10.1.10 command: [ 'bash', '-c', 'until [ $(curl --connect-timeout 5 --max-time 10 --write-out %{http_code} --silent --output /dev/null ${CLUSTERINFO_SERVICE_HOST}:${CLUSTERINFO_SERVICE_PORT_HTTPACTU}/actuator/health) -eq 200 ]; do sleep 2; done' ] resources: limits: cpu: 20m memory: 20Mi requests: cpu: 10m memory: 10Mi containers: - name: acc-controller env: - name: skip.ver.check value: "false" - name: SPRING_PROFILES_ACTIVE value: "grpc" # - name: GRPC.SERVER.SECURITY.ENABLED # value: "true" # - name: GRPC.SERVER.SECURITY.CERTIFICATECHAIN # value: "file:/etc/clusterinfo/certs/cert.pem" # - name: GRPC.SERVER.SECURITY.PRIVATEKEY # value: "file:/etc/clusterinfo/certs/key.pem" image: caapm/universalmonitoragent:24.10.1.10 resources: limits: cpu: 1 memory: 1024Mi requests: cpu: 200m memory: 300Mi imagePullPolicy: Always volumeMounts: - name: apm-acc-clientcerts mountPath: /etc/clusterinfo/certs readOnly: true command: ["/bin/bash", "-c", "/usr/local/openshift/apmia/runacc.sh"] restartPolicy: Always volumes: - name: apm-acc-clientcerts secret: secretName: caaiops-uma-certs selector: matchLabels: module: acc-controller --- # Source: uma/templates/deployment_apmia.yaml # ( and .Values.monitor.container.prometheus.backend.endPoint.url .Values.agentManager.url ) # APMIA based Deployment. # Agent from Container1 will be reported to |ClusterMonitoring|Infrastructure Agent apiVersion: apps/v1 kind: Deployment metadata: name: container-monitor namespace: dx-uma labels: app: apmia-deployment spec: replicas: 1 template: metadata: name: apmia-deployment labels: app: apmia-deployment deployment.name: container-monitor spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master - effect: NoSchedule key: node-role.kubernetes.io/storage - effect: NoSchedule operator: Exists - effect: NoExecute operator: Exists serviceAccountName: uma initContainers: - name: init-clusterinfo image: caapm/universalmonitoragent:24.10.1.10 command: ['bash', '-c', 'until [ $(curl --connect-timeout 5 --max-time 10 --write-out %{http_code} --silent --output /dev/null ${CLUSTERINFO_SERVICE_HOST}:${CLUSTERINFO_SERVICE_PORT_HTTPACTU}/actuator/health) -eq 200 ]; do sleep 2; done'] resources: limits: cpu: 20m memory: 20Mi requests: cpu: 10m memory: 10Mi securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL containers: - env: - name: agentManager_url_1 valueFrom: configMapKeyRef: key: agentManager.url name: caaiops-config-common - name: agentManager_credential valueFrom: configMapKeyRef: key: agentManager.credential name: caaiops-config-common - name: apmenv_introscope_agent_connection_compatibility_version valueFrom: configMapKeyRef: key: agentManager.version name: caaiops-config-common - name: apmenv_introscope_agent_nass_metric_ingestion_enabled valueFrom: configMapKeyRef: key: agentNaming_dataReporter_nassClientEnabled name: caaiops-config-common - name: interval value: "300" - name: KUEBRNETES_CLUSTER_MONITORING value: "true" - name: apmenv_introscope_epagent_config_httpServerPort value: "8888" - name: REDUCE_METRIC_EXPLOSION value: "false" - name: type value: "Openshift" - name: HostMonitoring value: disabled - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_cluster value: k8s_cluster_name=(k8s_cluster_clustername), k8s_project=(k8s_cluster_projects) - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_node value: k8s_project=(k8s_node_namespaces),k8s_cluster_name=(k8s_node_clustername), k8s_pod_nodename=(name), k8s_node_agentpath=(agent) - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_namespace value: k8s_project=(name), k8s_cluster_name=(k8s_namespace_clustername) - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_pod value: k8s_pod_name=(name), k8s_project=(k8s_pod_namespace), k8s_cluster_name=(k8s_pod_clustername) - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_deployment value: k8s_project=(k8s_deployment_namespace), k8s_cluster_name=(k8s_deployment_clustername) - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_deploymentconfig value: k8s_project=(k8s_deploymentconfig_namespace), k8s_cluster_name=(k8s_deploymentconfig_clustername) - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_daemonset value: k8s_project=(k8s_daemonset_namespace), k8s_cluster_name=(k8s_daemonset_clustername) - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_container value: k8s_pod_nodename=(k8s_container_nodename), k8s_pod_container_name=(name), k8s_pod_name=(k8s_container_podname),k8s_project=(k8s_container_namespace),k8s_pod_container_id=(k8s_container_id), k8s_cluster_name=(k8s_container_clustername) - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_replicaset value: k8s_project=(k8s_replicaset_namespace), k8s_cluster_name=(k8s_replicaset_clustername) - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_service value: k8s_project=(k8s_service_namespace), k8s_cluster_name=(k8s_service_clustername) - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_statefulset value: k8s_project=(k8s_statefulset_namespace), k8s_cluster_name=(k8s_statefulset_clustername) - name: apmenv_com_ca_apm_clusterdatareporter_additional_attributes_cronjob value: k8s_project=(k8s_cronjob_namespace), k8s_cluster_name=(k8s_cronjob_clustername) - name: apmenv_cluster_name valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_introscope_agent_customProcessName valueFrom: configMapKeyRef: key: agentNaming.deployment.apmia.process name: caaiops-config-common - name: apmenv_introscope_agent_hostName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_introscope_agent_metricClamp value: "100000" - name: apmenv_introscope_agent_agentName valueFrom: configMapKeyRef: key: agentNaming.deployment.apmia.agent name: caaiops-config-common - name: apmenv_introscope_agent_extensions_bundles_load value: ZipkinTracing, OpenTracing, ClusterDataReporter, UMAAgentController - name: apmenv_introscope_agent_log_destination value: "console, logfile" - name: MIN_HEAP_VAL_IN_MB value: "64" - name: MAX_HEAP_VAL_IN_MB value: "1700" - name: METADATA_GRAPH_BUFFER_SIZE value: "100" - name: clusterName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_agent_distributedTracing_collector_zipkin_server_hostport valueFrom: configMapKeyRef: key: monitor.application.zipkinTracing.zipkinServer.hostport name: caaiops-config-common - name: apmenv_introscope_agent_opentracing_grpc_server_hostport valueFrom: configMapKeyRef: key: monitor.application.opentracing.grpc.hostport name: caaiops-config-common - name: apmenv_com_ca_apm_clusterdatareporter_vertex_ttltime_minutes valueFrom: configMapKeyRef: key: monitor.clusterPerformance.dataReporter.vertex.ttl name: caaiops-config-common - name: apmenv_com_ca_apm_clusterdatareporter_vertex_refreshtime_minutes valueFrom: configMapKeyRef: key: monitor.clusterPerformance.dataReporter.vertex.refreshInterval name: caaiops-config-common - name: CAAPM_CLUSTERINFO_MUTUALTLS value: "true" - name: apmenv_com_ca_apm_acc_extType value: "clusterdatareporter" - name: apmenv_com_ca_apm_acc_namespace value: dx-uma - name: apmenv_com_ca_apm_clusterType value: "Openshift" - name: apmenv_com_ca_apm_clusterName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common name: uma image: caapm/universalmonitoragent:24.10.1.10 livenessProbe: httpGet: path: /healthz port: 8888 initialDelaySeconds: 60 periodSeconds: 60 resources: limits: cpu: 2 memory: 2000Mi requests: cpu: 200m memory: 300Mi imagePullPolicy: Always securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL volumeMounts: - name: container-monitor-certs mountPath: /etc/clusterinfo/certs readOnly: true volumes: - name: container-monitor-certs secret: secretName: caaiops-uma-certs restartPolicy: Always selector: matchLabels: app: apmia-deployment --- # Source: uma/templates/deployment_clusterdiscovery.yaml # Cluster Discovery Service apiVersion: apps/v1 kind: Deployment metadata: name: clusterinfo namespace: dx-uma labels: app: cagent module: clusterinfo annotations: ca.broadcom.application.name: kubernete-cluster-monitoring spec: replicas: 1 template: metadata: name: clusterinfo labels: app: cagent module: clusterinfo spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master - effect: NoSchedule key: node-role.kubernetes.io/storage - effect: NoSchedule operator: Exists - effect: NoExecute operator: Exists serviceAccountName: uma containers: - name: clusterinfo image: caapm/universalmonitoragent:24.10.1.10 env: #- name: CAAPM_CLUSTERINFO_DATA_WATCHERS_TIMEOUT # value: "240" - name: type value: Openshift - name: agent_connection_compatibility_version valueFrom: configMapKeyRef: key: agentManager.version name: caaiops-config-common - name: cluster_name valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common # When enabled, Kubernetes/Openshift events are published to DX SaaS. - name: clusterinfo_events_enabled valueFrom: configMapKeyRef: key: monitor.events.enabled name: caaiops-config-common # When enabled, Kubernetes/Openshift events are used to calculate the liveness and # readiness probe status metrics. - name: clusterinfo_events_metricsEnabled valueFrom: configMapKeyRef: key: cluster_events_metricsEnabled name: caaiops-config-common - name: clusterinfo_events_fieldSelector valueFrom: configMapKeyRef: key: monitor.events.filter.fieldselector name: caaiops-config-common - name: clusterinfo_events_namespaceFilter valueFrom: configMapKeyRef: key: monitor.events.filter.namespace name: caaiops-config-common - name: clusterinfo_events_tenantToken valueFrom: configMapKeyRef: key: agentManager.credential name: caaiops-config-common - name: clusterinfo_events_tenantIngestionUrl valueFrom: configMapKeyRef: key: agentManager.url name: caaiops-config-common - name: clusterinfo_events_tenantId valueFrom: configMapKeyRef: key: agentManager.tenantID name: caaiops-config-common - name: clusterinfo_events_elasticDocTypeId valueFrom: configMapKeyRef: key: monitor.events.elasticDocTypeId name: caaiops-config-common - name: clusterinfo_events_elasticDocTypeVersion valueFrom: configMapKeyRef: key: monitor.events.elasticDocTypeVersion name: caaiops-config-common - name: clusterinfo_events_elasticProduct valueFrom: configMapKeyRef: key: monitor.events.elasticProduct name: caaiops-config-common - name: clusterinfo_events_perDayClamp valueFrom: configMapKeyRef: key: monitor.events.clamp.perDayLimit name: caaiops-config-common - name: monitor.clusterPerformance.node.noscheduleTaint.skipMetricAggregation.enabled value: "true" - name: SERVER_PORT value: "8443" - name: SERVER_SSL_ENABLED value: "true" - name: SERVER.SSL.CERTIFICATE value: "/etc/clusterinfo/certs/cert.pem" - name: SERVER.SSL.CERTIFICATE-PRIVATE-KEY value: "/etc/clusterinfo/certs/key.pem" - name: SERVER.SSL.TRUST-CERTIFICATE value: "/etc/clusterinfo/certs/ca-cert.pem" - name: MAXHEAP value: 3000Mi - name: MINHEAP value: "1024Mi" - name: SERVER.SSL.CLIENT-AUTH value: "NEED" - name: SERVER.HTTP.ENABLED value: "true" - name: clusterinfo_deployment_role value: common - name: clusterinfo_pod_namespace valueFrom: fieldRef: fieldPath: metadata.namespace - name: clusterinfo_jvm_tuning_args value: "-XX:NewRatio=1 -XX:MaxTenuringThreshold=16 -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:NativeMemoryTracking=summary -XX:MaxGCPauseMillis=500 -XX:+ParallelRefProcEnabled -XX:+UseStringDeduplication -XX:ParallelGCThreads=8 -XX:ConcGCThreads=2" - name: clusterinfo_jvm_gc_logs_args value: "" resources: limits: cpu: 2 memory: 3000Mi requests: cpu: 200m memory: 800Mi imagePullPolicy: Always securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL volumeMounts: - name: apm-clusterinfo-clientcerts mountPath: /etc/clusterinfo/certs readOnly: true livenessProbe: httpGet: path: /up port: 8080 initialDelaySeconds: 60 periodSeconds: 120 command: ["/bin/bash", "-c", "/usr/local/openshift/apmia/runclusterinfo.sh"] args: [ "-Dlogging.config=file:/usr/local/openshift/logback_debug.xml" ] volumes: - name: apm-clusterinfo-clientcerts secret: secretName: caaiops-uma-certs restartPolicy: Always selector: matchLabels: module: clusterinfo --- # Source: uma/templates/deployment_httpcollector.yaml # APMIA HTTP Collector Deployment. apiVersion: apps/v1beta1 kind: Deployment metadata: name: apmia-http-collector namespace: dx-uma labels: app: apmia-http-collector spec: replicas: 1 selector: matchLabels: app: apmia-http-collector template: metadata: name: apmia-http-collector labels: app: apmia-http-collector deployment.name: http-collector-agent spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master - effect: NoSchedule key: node-role.kubernetes.io/storage - effect: NoSchedule operator: Exists - effect: NoExecute operator: Exists serviceAccountName: uma containers: - env: - name: agentManager_url_1 valueFrom: configMapKeyRef: key: agentManager.url name: caaiops-config-common - name: agentManager_credential valueFrom: configMapKeyRef: key: agentManager.credential name: caaiops-config-common - name: apmenv_introscope_agent_agentName valueFrom: fieldRef: fieldPath: metadata.name - name: apmenv_introscope_agent_hostName valueFrom: configMapKeyRef: key: agentNaming.deployment.httpCollector.host name: caaiops-config-common - name: apmenv_introscope_agent_customProcessName valueFrom: configMapKeyRef: key: agentNaming.deployment.httpCollector.process name: caaiops-config-common - name: apmenv_introscope_agent_extensions_bundles_load value: "http-collector, NodeExtension," - name: apmenv_introscope_agent_urlgroup_frontend_url_clamp value: "12" - name: apmenv_introscope_agent_urlgroup_backend_url_clamp value: "12" - name: apmenv_introscope_agent_stalls_transaction_untrack_thresholdseconds value: "90" - name: apmenv_introscope_agent_log_destination value: "console, logfile" - name: apmenv_http_collector_host value: "0.0.0.0" - name: apmenv_http_collector_port value: "8085" - name: apmenv_http_collector_displayName valueFrom: fieldRef: fieldPath: metadata.name - name: apmenv_http_collector_threadPoolSize value: "32" - name: apmenv_http_collector_start_delay value: "5" - name: apmenv_transaction_message_expirySeconds value: "5" - name: apmenv_http_collector_distributed_memory_grid_enabled value: "false" - name: MIN_HEAP_VAL_IN_MB value: "200" - name: MAX_HEAP_VAL_IN_MB value: "2900" - name: nodename valueFrom: fieldRef: fieldPath: spec.nodeName - name: HostMonitoring value: "disabled" name: http-collector-agent image: caapm/universalmonitoragent:24.10.1.10 imagePullPolicy: Always securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL ports: - containerPort: 8085 - containerPort: 5071 livenessProbe: httpGet: path: /apmia/datacollector/healthz port: 8085 initialDelaySeconds: 60 periodSeconds: 60 resources: limits: cpu: 2 memory: 3000Mi requests: cpu: 2 memory: 500Mi --- # Source: uma/templates/deployment_probeautoattach.yaml # Probe Auto Attach apiVersion: apps/v1 kind: Deployment metadata: name: apm-probe-autoattach-monitor namespace: dx-uma labels: app: apm-probe-autoattach spec: replicas: 1 template: metadata: labels: app: apm-probe-autoattach deployment.name: apm-probe-autoattach-monitor spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master - effect: NoSchedule key: node-role.kubernetes.io/storage - effect: NoSchedule operator: Exists - effect: NoExecute operator: Exists serviceAccountName: uma containers: - name: apm-probe-autoattach image: caapm/universalmonitoragent:24.10.1.10 command: - "/apm-probe-autoattach-linux" - "-tlsCertFile=/etc/webhook/certs/cert.pem" - "-tlsKeyFile=/etc/webhook/certs/key.pem" - "2>&1" env: - name: GRIZZLY_COLLECTOR_HOST value: "" - name: GRIZZLY_COLLECTOR_PORT value: "8085" - name: SUSTAINABILITY_METRICS value: "false" - name: CLUSTER_TYPE value: Openshift - name: TLS_ENABLE valueFrom: configMapKeyRef: key: probeTlsEnabled name: caaiops-probe-autoattach-configmap - name: MANAGE_WEBHOOK_CABUNDLE valueFrom: configMapKeyRef: key: manageWebhookCaBundle name: caaiops-probe-autoattach-configmap - name: LOG_LEVEL value: "3" - name: INSTID valueFrom: fieldRef: fieldPath: metadata.name - name: APP_NAME value: apm-probe-autoattach volumeMounts: - name: apm-probe-autoattach-certs mountPath: /etc/webhook/certs readOnly: true - name: apm-probe-autoattach-config mountPath: /etc/webhook/config - name: apm-probe-webhook-config mountPath: /etc/webhook/admconfig resources: limits: cpu: 4 memory: 200Mi requests: cpu: 400m memory: 80Mi imagePullPolicy: Always securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL volumes: - name: apm-probe-autoattach-certs secret: secretName: caaiops-probe-autoattach-certs - name: apm-probe-autoattach-config configMap: name: caaiops-probe-autoattach-configmap - name: apm-probe-webhook-config configMap: name: caaiops-probe-webhook-configmap selector: matchLabels: app: apm-probe-autoattach --- # Source: uma/templates/deployment_prometheus.yaml # Prometheus based Deployment. What is the Agent triplet name ? # Agent from Container1 will be reported to |ClusterMonitoring|Prometheus Agent apiVersion: apps/v1 kind: Deployment metadata: name: cluster-performance-prometheus namespace: dx-uma labels: app: prometheus-deployment spec: replicas: 1 template: metadata: name: prometheus-deployment labels: app: prometheus-deployment spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master - effect: NoSchedule key: node-role.kubernetes.io/storage - effect: NoSchedule operator: Exists - effect: NoExecute operator: Exists serviceAccountName: uma initContainers: - name: init-clusterinfo image: caapm/universalmonitoragent:24.10.1.10 command: ['bash', '-c', 'until [ $(curl --connect-timeout 5 --max-time 10 --write-out %{http_code} --silent --output /dev/null ${CLUSTERINFO_SERVICE_HOST}:${CLUSTERINFO_SERVICE_PORT_HTTPACTU}/actuator/health) -eq 200 ]; do sleep 2; done'] securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL resources: limits: cpu: 20m memory: 20Mi requests: cpu: 10m memory: 10Mi containers: - env: - name: agentManager_url_1 valueFrom: configMapKeyRef: key: agentManager.url name: caaiops-config-common - name: agentManager_credential valueFrom: configMapKeyRef: key: agentManager.credential name: caaiops-config-common - name: apmenv_introscope_agent_connection_compatibility_version valueFrom: configMapKeyRef: key: agentManager.version name: caaiops-config-common - name: apmenv_introscope_agent_extensions_bundles_load value: KubernetesRemoteMonitor, PrometheusImporter, UMAAgentController - name: apmenv_com_ca_apm_kubernetes_remote_monitor_type value: prometheus - name: apmenv_com_ca_apm_kubernetes_prometheus_datafile value: haproxy-ose,kube-state-metric,coredns,etcd,apiserver - name: apmenv_introscope_agent_agentName valueFrom: configMapKeyRef: key: agentNaming.deployment.prometheus.agent name: caaiops-config-common - name: apmenv_introscope_agent_customProcessName valueFrom: configMapKeyRef: key: agentNaming.deployment.prometheus.process name: caaiops-config-common - name: apmenv_introscope_agent_hostName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_com_ca_apm_acc_extType value: "prometheus extension" - name: apmenv_com_ca_apm_acc_namespace value: dx-uma - name: apmenv_com_ca_apm_clusterType value: "Openshift" - name: apmenv_com_ca_apm_clusterName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: nodename valueFrom: fieldRef: fieldPath: spec.nodeName - name: apmenv_com_ca_apm_kubernetes_prometheus_autodiscovery value: "true" - name: apmenv_introscope_agent_log_destination value: "console, logfile" - name: type value: "Openshift" - name: MIN_HEAP_VAL_IN_MB value: "64" - name: MAX_HEAP_VAL_IN_MB value: "724" - name: apmenv_introscope_epagent_config_httpServerPort value: "8888" - name: apmenv_com_wily_instroscope_agent_metric_calculators_k8scluster value: "/usr/local/openshift/apmia/extensions/KubernetesRemoteMonitor/config/calculator/clusterCalculator.json" - name: apmenv_com_wily_instroscope_agent_metric_calculators_k8shaproxy value: "/usr/local/openshift/apmia/extensions/KubernetesRemoteMonitor/config/calculator/haproxyCalculator.json" - name: APISERVER_MONITORING_ENABLED value: "true" - name: HostMonitoring value: disabled - name: clusterName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_cluster_name valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_com_ca_apm_prometheus_metric_importer_endpoints value: prometheus - name: apmenv_com_ca_apm_prometheus_metric_importer_endpoint_prometheus_url valueFrom: configMapKeyRef: key: monitor.container.prometheus.backend.endPoint.url name: caaiops-config-common - name: apmenv_com_ca_apm_prometheus_metric_importer_endpoint_prometheus_hostnameverifier_enabled valueFrom: configMapKeyRef: key: monitor.container.prometheus.backend.endPoint.hostNameVerifierEnabled name: caaiops-config-common - name: apmenv_com_ca_apm_prometheus_metric_importer_endpoint_prometheus_username valueFrom: configMapKeyRef: key: monitor.container.prometheus.backend.endPoint.username name: caaiops-config-common - name: apmenv_com_ca_apm_prometheus_metric_importer_endpoint_prometheus_password valueFrom: configMapKeyRef: key: monitor.container.prometheus.backend.endPoint.password name: caaiops-config-common - name: apmenv_com_ca_apm_prometheus_metric_importer_endpoint_prometheus_token valueFrom: configMapKeyRef: key: monitor.container.prometheus.backend.endPoint.token name: caaiops-config-common - name: apmenv_com_ca_apm_prometheus_metric_importer_endpoint_prometheus_polling_frequency value: "30" - name: apmenv_com_ca_apm_prometheus_metric_importer_endpoint_prometheus_useSecretKey value: "true" - name: apmenv_com_ca_apm_prometheus_metric_importer_endpoint_prometheus_config_files value: "k8s-cluster.yaml,k8s-container.yaml,k8s-pod.yaml,k8s-namespace.yaml,k8s-node.yaml,k8s-istio.yaml,k8s-deployment.yaml,k8s-daemonset.yaml,k8s-replicaset.yaml,k8s-statefulset.yaml" - name: apmenv_prometheus_metric_name_alias value: "container_name=container,pod_name=pod" - name: apmenv_prometheus_metric_name_alias_exception value: k8s-pod-mapdata.yaml,k8s-container-mapdata.yaml - name: CAAPM_CLUSTERINFO_MUTUALTLS value: "true" name: kubernetesservice image: caapm/universalmonitoragent:24.10.1.10 resources: limits: cpu: 2 memory: 1024Mi requests: cpu: 200m memory: 300Mi livenessProbe: httpGet: path: /healthz port: 8888 initialDelaySeconds: 60 periodSeconds: 60 imagePullPolicy: Always securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL volumeMounts: - name: config-volume mountPath: /usr/local/openshift/apmia/extensions/KubernetesRemoteMonitor/config/prometheus - name: apm-prometheus-exporter-certs mountPath: /etc/clusterinfo/certs readOnly: true - name: custom-promql-config-volume mountPath: /usr/local/openshift/apmia/extensions/PrometheusImporter/config/custom-promql - name: apm-promethues-certs mountPath: /etc/prometheus/certs readOnly: true volumes: - name: config-volume configMap: name: caaiops-config-prometheus - name: apm-prometheus-exporter-certs secret: secretName: caaiops-uma-certs - name: custom-promql-config-volume configMap: name: custom-promql-config optional: true - name: apm-promethues-certs secret: secretName: caaiops-uma-certs selector: matchLabels: app: prometheus-deployment --- # Source: uma/templates/ibmmq_monitor.yaml #IBM MQ monitoring requires username and password with certain authentication #username and password will be shared as secret in k8s environment #for sucessfull monitoring, please create secret for username/password and uncomment mq-secret template below apiVersion: apps/v1 kind: Deployment metadata: name: apm-ibmmqia-monitor namespace: dx-uma labels: tier: monitoring app: ibmmqia-monitor spec: selector: matchLabels: app: ibmmqia-monitor template: metadata: labels: app: ibmmqia-monitor annotations: ca.broadcom.application.name: ibmmqia-monitor spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master - effect: NoSchedule key: node-role.kubernetes.io/storage - effect: NoSchedule operator: Exists - effect: NoExecute operator: Exists containers: - name: apm-ibmmqia-monitor image: caapm/universalmonitoragent:24.10.1.10 imagePullPolicy: Always securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL env: - name: agentManager_url_1 valueFrom: configMapKeyRef: key: agentManager.url name: caaiops-config-common - name: agentManager_credential valueFrom: configMapKeyRef: key: agentManager.credential name: caaiops-config-common - name: apmenv_introscope_agent_connection_compatibility_version valueFrom: configMapKeyRef: key: agentManager.version name: caaiops-config-common - name: nodename valueFrom: fieldRef: fieldPath: spec.nodeName - name: apmenv_introscope_agent_hostName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_introscope_agent_customProcessName valueFrom: configMapKeyRef: key: agentNaming.deployment.apmia.process name: caaiops-config-common - name: apmenv_introscope_agent_agentName value: "ibmmqia Agent" - name: apmenv_introscope_agent_log_destination value: "console, logfile" - name: apmenv_introscope_agent_extensions_bundles_load value: ibm-mq-ia - name: apmenv_com_ca_apm_acc_extType value: "ibmmqia" - name: apmenv_com_ca_apm_acc_namespace value: dx-uma - name: apmenv_com_ca_apm_clusterType value: "Openshift" - name: apmenv_com_ca_apm_clusterName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: MIN_HEAP_VAL_IN_MB value: "64" - name: MAX_HEAP_VAL_IN_MB value: "724" - name: apmenv_introscope_agent_mq_k8s_discovery_localOnly value: "false" - name: CLUSTERINFO_SERVICE_HOST value: "clusterinfo.dx-uma.svc.cluster.local" - name: CLUSTERINFO_SERVICE_PORT value: "8443" #uncomment below code to pass username and password as environment parameters to ibmmq and create corresponding secret (mq-secret) in same dx-uma namespace # - name: SECRET_USERNAME # valueFrom: # secretKeyRef: # name: mq-secret # key: username # - name: SECRET_PASSWORD # valueFrom: # secretKeyRef: # name: mq-secret # key: password - name: CAAPM_CLUSTERINFO_MUTUALTLS value: "true" - name: apmenv_introscope_agent_mq_k8s_discovery_namespace value: - name: AGENT_SOURCE value: "UMA" - name: apmenv_introscope_epagent_config_httpServerPort value: "8888" - name: apmenv_introscope_agent_mq_monitor_profiles_default_plain_password value: "true" - name: apmenv_introscope_agent_metricClamp value: "50000" - name: apmenv_introscope_agent_stalemetrics_mq value: "/usr/local/openshift/apmia/extensions/ibm-mq-ia/config/mq_stalemetric_uma.json" resources: limits: cpu: 2 memory: 1024Mi requests: cpu: 200m memory: 300Mi livenessProbe: httpGet: path: /healthz port: 8888 initialDelaySeconds: 120 failureThreshold: 2 periodSeconds: 180 volumeMounts: - name: apm-ibmmqia-monitor-certs mountPath: /etc/mq/certs readOnly: true volumes: - name: apm-ibmmqia-monitor-certs secret: secretName: caaiops-uma-certs --- # Source: uma/templates/kafka_monitor.yaml apiVersion: apps/v1 kind: Deployment metadata: name: apm-kafka-monitor namespace: dx-uma labels: tier: monitoring app: kafka-monitor spec: selector: matchLabels: app: kafka-monitor template: metadata: labels: app: kafka-monitor annotations: ca.broadcom.application.name: kafka-monitor spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master - effect: NoSchedule key: node-role.kubernetes.io/storage - effect: NoSchedule operator: Exists - effect: NoExecute operator: Exists containers: - name: apm-kafka-monitor image: caapm/universalmonitoragent:24.10.1.10 imagePullPolicy: Always securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL env: - name: agentManager_url_1 valueFrom: configMapKeyRef: key: agentManager.url name: caaiops-config-common - name: agentManager_credential valueFrom: configMapKeyRef: key: agentManager.credential name: caaiops-config-common - name: apmenv_com_ca_apm_acc_extType value: "kafka" - name: apmenv_com_ca_apm_acc_namespace value: dx-uma - name: apmenv_com_ca_apm_clusterType value: "Openshift" - name: apmenv_com_ca_apm_uma_clusterName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_introscope_agent_connection_compatibility_version valueFrom: configMapKeyRef: key: agentManager.version name: caaiops-config-common - name: nodename valueFrom: fieldRef: fieldPath: spec.nodeName - name: apmenv_introscope_agent_hostName valueFrom: configMapKeyRef: key: clusterName name: caaiops-config-common - name: apmenv_introscope_agent_customProcessName valueFrom: configMapKeyRef: key: agentNaming.deployment.apmia.process name: caaiops-config-common - name: apmenv_introscope_agent_agentName value: "Kafka Agent" - name: apmenv_introscope_agent_log_destination value: "console, logfile" - name: apmenv_introscope_agent_extensions_bundles_load value: KafkaExtension - name: MIN_HEAP_VAL_IN_MB value: "64" - name: MAX_HEAP_VAL_IN_MB value: "724" - name: type value: Openshift - name: interval value: "60" - name: apmenv_introscope_epagent_config_httpServerPort value: "8888" - name: apmenv_introscope_agent_kafka_k8s_broker_discovery_localOnly value: "false" - name: apmenv_introscope_agent_kafka_k8s_broker_discovery_namespace value: - name: apmenv_introscope_agent_kafka_consumergroups_enabled value: "true" - name: apmenv_introscope_agent_kafka_consumergroups_filter value: - name: apmenv_introscope_agent_kafka_broker_jmx_config_modules value: "kafka,jvm,memory" - name: apmenv_introscope_agent_kafka_broker_jmx_include_filter value: "" - name: apmenv_introscope_agent_kafka_broker_jmx_exclude_filter value: "" - name: apmenv_introscope_agent_kafka_config_polling_interval_seconds value: "300" - name: AGENT_SOURCE value: "UMA" - name: apmenv_introscope_agent_kafka_atc_enabled value: "true" - name: apmenv_introscope_agent_kafka_atc_graphTtl value: "1800000" - name: CLUSTERINFO_SERVICE_HOST value: "clusterinfo.dx-uma.svc.cluster.local" - name: CLUSTERINFO_SERVICE_PORT value: "8443" - name: apmenv_introscope_agent_metricClamp value: "50000" - name: apmenv_introscope_agent_remotejmx_clamp value: "50000" - name: CAAPM_CLUSTERINFO_MUTUALTLS value: "true" resources: limits: cpu: 1 memory: 1024Mi requests: cpu: 200m memory: 300Mi livenessProbe: httpGet: path: /healthz port: 8888 initialDelaySeconds: 120 failureThreshold: 2 periodSeconds: 180 volumeMounts: - name: apm-kafka-monitor-certs mountPath: /etc/kafka/certs readOnly: true volumes: - name: apm-kafka-monitor-certs secret: secretName: caaiops-uma-certs --- # Source: uma/templates/deployment_collector.yaml # Collector Service --- # Source: uma/templates/ingress_httpcollector.yaml # --- # Create Ingress Resource here if you are using HTTP Collector with Ingress Controller Load Balancer