},
},
{
- alert: 'CephPGUnavilableBlockingIO',
+ alert: 'CephPGUnavailableBlockingIO',
'for': '1m',
expr: '((ceph_health_detail{name="PG_AVAILABILITY"} == 1) - scalar(ceph_health_detail{name="OSD_DOWN"})) == 1',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.3' },
expr: 'ceph_health_detail{name="CEPHADM_FAILED_DAEMON"} > 0',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.11.1' },
annotations: {
- summary: 'A ceph daemon manged by cephadm is down%(cluster)s' % $.MultiClusterSummary(),
+ summary: 'A ceph daemon managed by cephadm is down%(cluster)s' % $.MultiClusterSummary(),
description: "A daemon managed by cephadm is no longer active. Determine, which daemon is down with 'ceph health detail'. you may start daemons with the 'ceph orch daemon start <daemon_id>'",
},
},
type: "ceph_default"
- alert: "CephDeviceFailurePredictionTooHigh"
annotations:
- description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availabililty. Prevent data integrity issues by adding new OSDs so that data may be relocated."
+ description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availability. Prevent data integrity issues by adding new OSDs so that data may be relocated."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany"
summary: "Too many devices are predicted to fail, unable to resolve"
expr: "ceph_health_detail{name=\"DEVICE_HEALTH_TOOMANY\"} == 1"
oid: "1.3.6.1.4.1.50495.1.2.1.7.5"
severity: "critical"
type: "ceph_default"
- - alert: "CephPGUnavilableBlockingIO"
+ - alert: "CephPGUnavailableBlockingIO"
annotations:
description: "Data availability is reduced, impacting the cluster's ability to service I/O. One or more placement groups (PGs) are in a state that blocks I/O."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability"
- alert: "CephadmDaemonFailed"
annotations:
description: "A daemon managed by cephadm is no longer active. Determine, which daemon is down with 'ceph health detail'. you may start daemons with the 'ceph orch daemon start <daemon_id>'"
- summary: "A ceph daemon manged by cephadm is down"
+ summary: "A ceph daemon managed by cephadm is down"
expr: "ceph_health_detail{name=\"CEPHADM_FAILED_DAEMON\"} > 0"
for: "30s"
labels:
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.11.1
exp_annotations:
- summary: A ceph daemon manged by cephadm is down
+ summary: A ceph daemon managed by cephadm is down
description: "A daemon managed by cephadm is no longer active. Determine, which daemon is down with 'ceph health detail'. you may start daemons with the 'ceph orch daemon start <daemon_id>'"
- interval: 1m
input_series:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany
summary: Too many devices are predicted to fail, unable to resolve
- description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availabililty. Prevent data integrity issues by adding new OSDs so that data may be relocated."
+ description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availability. Prevent data integrity issues by adding new OSDs so that data may be relocated."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="DEVICE_HEALTH_IN_USE"}'
alert_rule_test:
# PG_AVAILABILITY and OSD_DOWN not firing .. no alert
- eval_time: 1m
- alertname: CephPGUnavilableBlockingIO
+ alertname: CephPGUnavailableBlockingIO
exp_alerts:
# PG_AVAILABILITY firing, but osd_down is active .. no alert
- eval_time: 5m
- alertname: CephPGUnavilableBlockingIO
+ alertname: CephPGUnavailableBlockingIO
exp_alerts:
# PG_AVAILABILITY firing, AND OSD_DOWN is not active...raise the alert
- eval_time: 15m
- alertname: CephPGUnavilableBlockingIO
+ alertname: CephPGUnavailableBlockingIO
exp_alerts:
- exp_labels:
name: "PG_AVAILABILITY"
def step_impl(context, eval_time):
eval_time_without_unit, unit = resolve_time_and_unit(eval_time)
if eval_time_without_unit is None:
- raise ValueError(f'Invalid evalution time: {eval_time}. ' +
+ raise ValueError(f'Invalid evaluation time: {eval_time}. ' +
'A valid time looks like "1m" where you have a number plus a unit')
global_context.promql_expr_test.set_eval_time(eval_time_without_unit, unit)
def replace_grafana_expr_variables(expr: str, variable: str, value: Any) -> str:
""" Replace grafana variables in expression with a value
- It should match the whole word, 'osd' musn't match with the 'osd' prefix in 'osd_hosts'
+ It should match the whole word, 'osd' must not match with the 'osd' prefix in 'osd_hosts'
>>> replace_grafana_expr_variables('metric{name~="$osd_hosts|$other|$osd"}', \
'osd', 'replacement')
'metric{name~="$osd_hosts|$other|replacement"}'
self._task_post('/api/block/mirroring/pool/rbd/bootstrap/peer', import_data)
self.assertStatus(400)
- # cannot import "youself" as peer
+ # cannot import "yourself" as peer
import_data['direction'] = 'rx'
self._task_post('/api/block/mirroring/pool/rbd/bootstrap/peer', import_data)
self.assertStatus(400)
'Password must not contain sequential characters.')
self._reset_login_to_admin('test1')
- def test_change_password_contains_repetetive_characters(self):
+ def test_change_password_contains_repetitive_characters(self):
self.create_user('test1', 'mypassword10#', ['read-only'], force_password=False)
self.login('test1', 'mypassword10#')
self._post('/api/user/test1/change_password', {
dashboard_config = json.load(f)
uid = dashboard_config.get('uid')
# if it's not a grafana dashboard, skip checks
- # Fields in a dasbhoard:
+ # Fields in a dashboard:
# https://grafana.com/docs/grafana/latest/dashboards/json-model/#json-fields
expected_fields = [
'id', 'uid', 'title', 'tags', 'style', 'timezone', 'editable',
raise NotImplementedError
def to_dict(self, key=''):
- # intialize the schema of this container
+ # initialize the schema of this container
ui_schemas = []
control_schema = {
'type': self._property_type(),
"No cephfs with id {0}".format(fs_id))
# Decorate the metadata with some fields that will be
- # indepdendent of whether it's a kernel or userspace
+ # independent of whether it's a kernel or userspace
# client, so that the javascript doesn't have to grok that.
for client in clients:
if "ceph_version" in client['client_metadata']: # pragma: no cover - no complexity
:param hosts: Hostnames to query.
:param refresh: Ask the Orchestrator to refresh the inventories. Note the this is an
asynchronous operation, the updated version of inventories need to
- be re-qeuried later.
+ be re-queried later.
:return: Returns list of inventory.
:rtype: list
"""
// Check for visibility of modal container
cy.get('.modal-header').should('be.visible');
- // If purgeing a specific pool, selects that pool if given
+ // If purging a specific pool, selects that pool if given
if (pool !== undefined) {
this.selectOption('poolName', pool);
cy.get('#poolName').should('have.class', 'ng-valid'); // check if pool is selected
cy.fixture('orchestrator/services.json').as('services');
});
- it('should not add an exsiting host', function () {
+ it('should not add an existing host', function () {
const hostname = Cypress._.sample(this.hosts).name;
hosts.navigateTo('add');
hosts.add(hostname, true);
| ceph-node-01 |
| ceph-node-02 |
- Scenario: Add exisiting host and verify it failed
+ Scenario: Add existing host and verify it failed
Given I am on the "Add Hosts" section
And I should see a row with "ceph-node-00"
When I click on "Add" button
// Check invalid placement target input
this.selectOwner(BucketsPageHelper.USERS[1]);
- // The drop down error message will not appear unless a valid option is previsously selected.
+ // The drop down error message will not appear unless a valid option is previously selected.
this.selectPlacementTarget('default-placement');
this.selectPlacementTarget('-- Select a placement target --');
cy.get('@nameInputField').click(); // Trigger validation
create(tenant: string, user_id: string, fullname: string, email: string, maxbuckets: string) {
// Enter in user_id
cy.get('#user_id').type(user_id);
- // Show Tenanat
+ // Show Tenant
cy.get('#show_tenant').click({ force: true });
// Enter in tenant
cy.get('#tenant').type(tenant);
notification.getToast().should('not.exist');
notification.open();
notification.getNotifications().should('have.length.gt', 0);
- notification.getClearNotficationsBtn().should('be.visible');
+ notification.getClearNotificationsBtn().should('be.visible');
notification.clearNotifications();
});
});
import { PageHelper } from '../page-helper.po';
export class NotificationSidebarPageHelper extends PageHelper {
- getNotificatinoIcon() {
+ getNotificationIcon() {
return cy.get('cd-notifications a');
}
return this.getSidebar().find('.card.tc_notification');
}
- getClearNotficationsBtn() {
+ getClearNotificationsBtn() {
return this.getSidebar().find('button.btn-block');
}
}
open() {
- this.getNotificatinoIcon().click();
+ this.getNotificationIcon().click();
this.getSidebar().should('be.visible');
}
clearNotifications() {
// It can happen that although notifications are cleared, by the time we check the notifications
// amount, another notification can appear, so we check it more than once (if needed).
- this.getClearNotficationsBtn().click();
+ this.getClearNotificationsBtn().click();
this.getNotifications()
.should('have.length.gte', 0)
.then(($elems) => {
pools = pools.filter(
(pool: any) => this.rbdService.isRBDPool(pool) && pool.type === 'replicated'
);
- const promisses: Observable<any>[] = [];
+ const promises: Observable<any>[] = [];
pools.forEach((pool: any) => {
- promisses.push(this.rbdService.listNamespaces(pool['pool_name']));
+ promises.push(this.rbdService.listNamespaces(pool['pool_name']));
});
- if (promisses.length > 0) {
- forkJoin(promisses).subscribe((data: Array<Array<string>>) => {
+ if (promises.length > 0) {
+ forkJoin(promises).subscribe((data: Array<Array<string>>) => {
const result: any[] = [];
for (let i = 0; i < data.length; i++) {
const namespaces = data[i];
private disableCreateSnapshot(): string | boolean {
const folders = this.selectedDir.path.split('/').slice(1);
- // With deph of 4 or more we have the subvolume files/folders for which we cannot create
+ // With depth of 4 or more we have the subvolume files/folders for which we cannot create
// a snapshot. Somehow, you can create a snapshot of the subvolume but not its files.
if (folders.length >= 4 && folders[0] === 'volumes') {
return $localize`Cannot create snapshots for files/folders in the subvolume ${folders[2]}`;
}
const node = this.getNode(parent);
if (!node) {
- // Node will not be found for new sub sub directories - this is the intended behaviour
+ // Node will not be found for new sub directories - this is the intended behaviour
return;
}
const children = this.getChildren(parent);
expect(spans[2].textContent).toContain('rgw: 1');
});
- it('should test if host facts are tranformed correctly if orch available', () => {
+ it('should test if host facts are transformed correctly if orch available', () => {
const features = [OrchestratorFeature.HOST_FACTS];
const payload = [
{
expect(spans[7].textContent).toBe('N/A');
});
- it('should test if host facts are unavailable if get_fatcs orch feature is not available', () => {
+ it('should test if host facts are unavailable if get_facts orch feature is not available', () => {
const payload = [
{
hostname: 'host_test',
expect(component.getMaxSize()).toBe(3);
});
- it('should return the osd count as minimum if its lower the the rule minimum', () => {
+ it('should return the osd count as minimum if its lower the rule minimum', () => {
component.info.osd_count = 0;
formHelper.setValue('crushRule', component.info.crush_rules_replicated[0]);
const control = form.get('crushRule');
});
});
- it('should test if bucket data is tranformed correctly', () => {
+ it('should test if bucket data is transformed correctly', () => {
rgwBucketServiceListSpy.and.returnValue(
of([
{
});
});
- it('should test if rgw-user data is tranformed correctly', () => {
+ it('should test if rgw-user data is transformed correctly', () => {
rgwUserServiceListSpy.and.returnValue(
of([
{
"Local Time is: Thu Oct 24 10:17:06 2019 CEST",
"Firmware Updates (0x16): 3 Slots, no Reset required",
"Optional Admin Commands (0x0017): Security Format Frmw_DL Self_Test",
- "Optional NVM Commands (0x005f): Comp Wr_Unc DS_Mngmt Wr_Zero Sav/Sel_Feat Timestmp",
+ "Optional NVM Commands (0x005f): Comp Wr_Unc DS_Mngmt Wr_Zero Sav/Sel_Feat Timestamp",
"Maximum Data Transfer Size: 512 Pages",
"Warning Comp. Temp. Threshold: 85 Celsius",
"Critical Comp. Temp. Threshold: 85 Celsius",
prop: 'errors_corrected_by_rereads_rewrites',
name: $localize`Errors Corrected by Rereads/Rewrites`
},
- { prop: 'gigabytes_processed', name: $localize`Gigabyes Processed` },
+ { prop: 'gigabytes_processed', name: $localize`Gigabytes Processed` },
{ prop: 'total_errors_corrected', name: $localize`Total Errors Corrected` },
{ prop: 'total_uncorrected_errors', name: $localize`Total Errors Uncorrected` }
];
}
_handleTasks(executingTasks: ExecutingTask[]) {
- for (const excutingTask of executingTasks) {
- excutingTask.description = this.taskMessageService.getRunningTitle(excutingTask);
+ for (const executingTask of executingTasks) {
+ executingTask.description = this.taskMessageService.getRunningTitle(executingTask);
}
this.executingTasks = executingTasks;
}
});
});
- describe('dimmlessBinary validators', () => {
+ describe('dimlessBinary validators', () => {
const i18nMock = (a: string, b: { value: string }) => a.replace('{{value}}', b.value);
beforeEach(() => {
export class ImageSpec {
static fromString(imageSpec: string) {
- const imageSpecSplited = imageSpec.split('/');
+ const imageSpecSplit = imageSpec.split('/');
- const poolName = imageSpecSplited[0];
- const namespace = imageSpecSplited.length >= 3 ? imageSpecSplited[1] : null;
- const imageName = imageSpecSplited.length >= 3 ? imageSpecSplited[2] : imageSpecSplited[1];
+ const poolName = imageSpecSplit[0];
+ const namespace = imageSpecSplit.length >= 3 ? imageSpecSplit[1] : null;
+ const imageName = imageSpecSplit.length >= 3 ? imageSpecSplit[2] : imageSpecSplit[1];
return new this(poolName, namespace, imageName);
}
* source: https://stackoverflow.com/a/34270811
*
* @param {number} seconds The number of seconds to be processed
- * @return {string} The phrase describing the the amount of time
+ * @return {string} The phrase describing the amount of time
*/
transform(seconds: number): string {
if (seconds === null || seconds <= 0) {
--- /dev/null
+import { fromEvent, Observable, partition } from 'rxjs';
+import { repeatWhen, shareReplay, takeUntil } from 'rxjs/operators';
+
+export function whenPageVisible() {
+ const visibilitychange$ = fromEvent(document, 'visibilitychange').pipe(
+ shareReplay({ refCount: true, bufferSize: 1 })
+ );
+
+ const [pageVisible$, pageHidden$] = partition(
+ visibilitychange$,
+ () => document.visibilityState === 'visible'
+ );
+
+ return function <T>(source: Observable<T>) {
+ return source.pipe(
+ takeUntil(pageHidden$),
+ repeatWhen(() => pageVisible$)
+ );
+ };
+}
+++ /dev/null
-import { fromEvent, Observable, partition } from 'rxjs';
-import { repeatWhen, shareReplay, takeUntil } from 'rxjs/operators';
-
-export function whenPageVisible() {
- const visibilitychange$ = fromEvent(document, 'visibilitychange').pipe(
- shareReplay({ refCount: true, bufferSize: 1 })
- );
-
- const [pageVisible$, pageHidden$] = partition(
- visibilitychange$,
- () => document.visibilityState === 'visible'
- );
-
- return function <T>(source: Observable<T>) {
- return source.pipe(
- takeUntil(pageHidden$),
- repeatWhen(() => pageVisible$)
- );
- };
-}
import { catchError, delay, mergeMap, repeat, tap } from 'rxjs/operators';
import { Motd, MotdService } from '~/app/shared/api/motd.service';
-import { whenPageVisible } from '../rxjs/operators/page-visibilty.operator';
+import { whenPageVisible } from '../rxjs/operators/page-visibility.operator';
@Injectable({
providedIn: 'root'
import { Observable, timer } from 'rxjs';
import { observeOn, shareReplay, switchMap } from 'rxjs/operators';
-import { whenPageVisible } from '../rxjs/operators/page-visibilty.operator';
+import { whenPageVisible } from '../rxjs/operators/page-visibility.operator';
import { NgZoneSchedulerService } from './ngzone-scheduler.service';
@Injectable({
del config['gateways'][gateway_name]
cls._save_config(config)
except RequestException:
- # If gateway is not acessible, it should be removed manually
+ # If gateway is not accessible, it should be removed manually
# or we will try to update automatically next time
continue
inst = ctrl()
# We need to cache the controller endpoints because
- # BaseController#endpoints method is not idempontent
+ # BaseController#endpoints method is not idempotent
# and a controller might be needed by more than one
# unit test.
if ctrl not in cls._endpoints_cache:
self.available = available
self.path = path
- def create_invetory_host(host, devices_data):
+ def create_inventory_host(host, devices_data):
inventory_host = mock.Mock()
inventory_host.devices.devices = []
for data in devices_data:
for device in devices_data:
hosts.add(device['host'])
- inventory = [create_invetory_host(host, devices_data) for host in hosts]
+ inventory = [create_inventory_host(host, devices_data) for host in hosts]
orch_client_mock.inventory.list.return_value = inventory