__pycache__
.cache
ceph.conf
+wheelhouse
# IDE
.vscode
--- /dev/null
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from collections import defaultdict
+import json
+
+import cherrypy
+from mgr_module import CommandResult
+
+from ..tools import ApiController, AuthRequired, BaseController, ViewCache
+
+
+@ApiController('cephfs')
+@AuthRequired()
+class CephFS(BaseController):
+ def __init__(self):
+ super(CephFS, self).__init__()
+
+ # Stateful instances of CephFSClients, hold cached results. Key to
+ # dict is FSCID
+ self.cephfs_clients = {}
+
+ @cherrypy.expose
+ @cherrypy.tools.json_out()
+ def clients(self, fs_id):
+ fs_id = self.fs_id_to_int(fs_id)
+
+ return self._clients(fs_id)
+
+ @cherrypy.expose
+ @cherrypy.tools.json_out()
+ def data(self, fs_id):
+ fs_id = self.fs_id_to_int(fs_id)
+
+ return self.fs_status(fs_id)
+
+ @cherrypy.expose
+ @cherrypy.tools.json_out()
+ def mds_counters(self, fs_id):
+ """
+ Result format: map of daemon name to map of counter to list of datapoints
+ rtype: dict[str, dict[str, list]]
+ """
+
+ # Opinionated list of interesting performance counters for the GUI --
+ # if you need something else just add it. See how simple life is
+ # when you don't have to write general purpose APIs?
+ counters = [
+ "mds_server.handle_client_request",
+ "mds_log.ev",
+ "mds_cache.num_strays",
+ "mds.exported",
+ "mds.exported_inodes",
+ "mds.imported",
+ "mds.imported_inodes",
+ "mds.inodes",
+ "mds.caps",
+ "mds.subtrees"
+ ]
+
+ fs_id = self.fs_id_to_int(fs_id)
+
+ result = {}
+ mds_names = self._get_mds_names(fs_id)
+
+ for mds_name in mds_names:
+ result[mds_name] = {}
+ for counter in counters:
+ data = self.mgr.get_counter("mds", mds_name, counter)
+ if data is not None:
+ result[mds_name][counter] = data[counter]
+ else:
+ result[mds_name][counter] = []
+
+ return dict(result)
+
+ @staticmethod
+ def fs_id_to_int(fs_id):
+ try:
+ return int(fs_id)
+ except ValueError:
+ raise cherrypy.HTTPError(400, "Invalid cephfs id {}".format(fs_id))
+
+ def _get_mds_names(self, filesystem_id=None):
+ names = []
+
+ fsmap = self.mgr.get("fs_map")
+ for fs in fsmap['filesystems']:
+ if filesystem_id is not None and fs['id'] != filesystem_id:
+ continue
+ names.extend([info['name']
+ for _, info in fs['mdsmap']['info'].items()])
+
+ if filesystem_id is None:
+ names.extend(info['name'] for info in fsmap['standbys'])
+
+ return names
+
+ def get_rate(self, daemon_type, daemon_name, stat):
+ data = self.mgr.get_counter(daemon_type, daemon_name, stat)[stat]
+
+ if data and len(data) > 1:
+ return (data[-1][1] - data[-2][1]) / float(data[-1][0] - data[-2][0])
+
+ return 0
+
+ # pylint: disable=too-many-locals,too-many-statements,too-many-branches
+ def fs_status(self, fs_id):
+ mds_versions = defaultdict(list)
+
+ fsmap = self.mgr.get("fs_map")
+ filesystem = None
+ for fs in fsmap['filesystems']:
+ if fs['id'] == fs_id:
+ filesystem = fs
+ break
+
+ if filesystem is None:
+ raise cherrypy.HTTPError(404,
+ "CephFS id {0} not found".format(fs_id))
+
+ rank_table = []
+
+ mdsmap = filesystem['mdsmap']
+
+ client_count = 0
+
+ for rank in mdsmap["in"]:
+ up = "mds_{0}".format(rank) in mdsmap["up"]
+ if up:
+ gid = mdsmap['up']["mds_{0}".format(rank)]
+ info = mdsmap['info']['gid_{0}'.format(gid)]
+ dns = self.get_latest("mds", info['name'], "mds.inodes")
+ inos = self.get_latest("mds", info['name'], "mds_mem.ino")
+
+ if rank == 0:
+ client_count = self.get_latest("mds", info['name'],
+ "mds_sessions.session_count")
+ elif client_count == 0:
+ # In case rank 0 was down, look at another rank's
+ # sessionmap to get an indication of clients.
+ client_count = self.get_latest("mds", info['name'],
+ "mds_sessions.session_count")
+
+ laggy = "laggy_since" in info
+
+ state = info['state'].split(":")[1]
+ if laggy:
+ state += "(laggy)"
+
+ # if state == "active" and not laggy:
+ # c_state = self.colorize(state, self.GREEN)
+ # else:
+ # c_state = self.colorize(state, self.YELLOW)
+
+ # Populate based on context of state, e.g. client
+ # ops for an active daemon, replay progress, reconnect
+ # progress
+ activity = ""
+
+ if state == "active":
+ activity = self.get_rate("mds",
+ info['name'],
+ "mds_server.handle_client_request")
+
+ metadata = self.mgr.get_metadata('mds', info['name'])
+ mds_versions[metadata.get('ceph_version', 'unknown')].append(
+ info['name'])
+ rank_table.append(
+ {
+ "rank": rank,
+ "state": state,
+ "mds": info['name'],
+ "activity": activity,
+ "dns": dns,
+ "inos": inos
+ }
+ )
+
+ else:
+ rank_table.append(
+ {
+ "rank": rank,
+ "state": "failed",
+ "mds": "",
+ "activity": "",
+ "dns": 0,
+ "inos": 0
+ }
+ )
+
+ # Find the standby replays
+ # pylint: disable=unused-variable
+ for gid_str, daemon_info in mdsmap['info'].iteritems():
+ if daemon_info['state'] != "up:standby-replay":
+ continue
+
+ inos = self.get_latest("mds", daemon_info['name'], "mds_mem.ino")
+ dns = self.get_latest("mds", daemon_info['name'], "mds.inodes")
+
+ activity = self.get_rate(
+ "mds", daemon_info['name'], "mds_log.replay")
+
+ rank_table.append(
+ {
+ "rank": "{0}-s".format(daemon_info['rank']),
+ "state": "standby-replay",
+ "mds": daemon_info['name'],
+ "activity": activity,
+ "dns": dns,
+ "inos": inos
+ }
+ )
+
+ df = self.mgr.get("df")
+ pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
+ osdmap = self.mgr.get("osd_map")
+ pools = dict([(p['pool'], p) for p in osdmap['pools']])
+ metadata_pool_id = mdsmap['metadata_pool']
+ data_pool_ids = mdsmap['data_pools']
+
+ pools_table = []
+ for pool_id in [metadata_pool_id] + data_pool_ids:
+ pool_type = "metadata" if pool_id == metadata_pool_id else "data"
+ stats = pool_stats[pool_id]
+ pools_table.append({
+ "pool": pools[pool_id]['pool_name'],
+ "type": pool_type,
+ "used": stats['bytes_used'],
+ "avail": stats['max_avail']
+ })
+
+ standby_table = []
+ for standby in fsmap['standbys']:
+ metadata = self.mgr.get_metadata('mds', standby['name'])
+ mds_versions[metadata.get('ceph_version', 'unknown')].append(
+ standby['name'])
+
+ standby_table.append({
+ 'name': standby['name']
+ })
+
+ return {
+ "cephfs": {
+ "id": fs_id,
+ "name": mdsmap['fs_name'],
+ "client_count": client_count,
+ "ranks": rank_table,
+ "pools": pools_table
+ },
+ "standbys": standby_table,
+ "versions": mds_versions
+ }
+
+ def _clients(self, fs_id):
+ cephfs_clients = self.cephfs_clients.get(fs_id, None)
+ if cephfs_clients is None:
+ cephfs_clients = CephFSClients(self.mgr, fs_id)
+ self.cephfs_clients[fs_id] = cephfs_clients
+
+ try:
+ status, clients = cephfs_clients.get()
+ except AttributeError:
+ raise cherrypy.HTTPError(404,
+ "No cephfs with id {0}".format(fs_id))
+ if clients is None:
+ raise cherrypy.HTTPError(404,
+ "No cephfs with id {0}".format(fs_id))
+
+ # Decorate the metadata with some fields that will be
+ # indepdendent of whether it's a kernel or userspace
+ # client, so that the javascript doesn't have to grok that.
+ for client in clients:
+ if "ceph_version" in client['client_metadata']:
+ client['type'] = "userspace"
+ client['version'] = client['client_metadata']['ceph_version']
+ client['hostname'] = client['client_metadata']['hostname']
+ elif "kernel_version" in client['client_metadata']:
+ client['type'] = "kernel"
+ client['version'] = client['client_metadata']['kernel_version']
+ client['hostname'] = client['client_metadata']['hostname']
+ else:
+ client['type'] = "unknown"
+ client['version'] = ""
+ client['hostname'] = ""
+
+ return {
+ 'status': status,
+ 'data': clients
+ }
+
+ def get_latest(self, daemon_type, daemon_name, stat):
+ data = self.mgr.get_counter(daemon_type, daemon_name, stat)[stat]
+ if data:
+ return data[-1][1]
+ return 0
+
+
+class CephFSClients(object):
+ def __init__(self, module_inst, fscid):
+ self._module = module_inst
+ self.fscid = fscid
+
+ # pylint: disable=unused-variable
+ @ViewCache()
+ def get(self):
+ mds_spec = "{0}:0".format(self.fscid)
+ result = CommandResult("")
+ self._module.send_command(result, "mds", mds_spec,
+ json.dumps({
+ "prefix": "session ls",
+ }),
+ "")
+ r, outb, outs = result.wait()
+ # TODO handle nonzero returns, e.g. when rank isn't active
+ assert r == 0
+ return json.loads(outb)
import { RouterModule, Routes } from '@angular/router';
import { PoolDetailComponent } from './ceph/block/pool-detail/pool-detail.component';
+import { CephfsComponent } from './ceph/cephfs/cephfs/cephfs.component';
+import { ClientsComponent } from './ceph/cephfs/clients/clients.component';
import { HostsComponent } from './ceph/cluster/hosts/hosts.component';
import { MonitorComponent } from './ceph/cluster/monitor/monitor.component';
import { DashboardComponent } from './ceph/dashboard/dashboard/dashboard.component';
canActivate: [AuthGuardService]
},
{ path: 'monitor', component: MonitorComponent, canActivate: [AuthGuardService] },
+ { path: 'cephfs/:id/clients', component: ClientsComponent, canActivate: [AuthGuardService] },
+ { path: 'cephfs/:id', component: CephfsComponent, canActivate: [AuthGuardService] },
{ path: '404', component: NotFoundComponent },
{ path: '**', redirectTo: '/404'}
];
import { NgModule } from '@angular/core';
import { BlockModule } from './block/block.module';
+import { CephfsModule } from './cephfs/cephfs.module';
import { ClusterModule } from './cluster/cluster.module';
import { DashboardModule } from './dashboard/dashboard.module';
import { RgwModule } from './rgw/rgw.module';
ClusterModule,
DashboardModule,
RgwModule,
- BlockModule
+ BlockModule,
+ CephfsModule
],
declarations: []
})
--- /dev/null
+import { CommonModule } from '@angular/common';
+import { NgModule } from '@angular/core';
+
+import { ChartsModule } from 'ng2-charts/ng2-charts';
+import { ProgressbarModule } from 'ngx-bootstrap/progressbar';
+
+import { AppRoutingModule } from '../../app-routing.module';
+import { SharedModule } from '../../shared/shared.module';
+import { CephfsService } from './cephfs.service';
+import { CephfsComponent } from './cephfs/cephfs.component';
+import { ClientsComponent } from './clients/clients.component';
+
+@NgModule({
+ imports: [
+ CommonModule,
+ SharedModule,
+ AppRoutingModule,
+ ChartsModule,
+ ProgressbarModule.forRoot()
+ ],
+ declarations: [CephfsComponent, ClientsComponent],
+ providers: [CephfsService]
+})
+export class CephfsModule {}
--- /dev/null
+import { HttpClientModule } from '@angular/common/http';
+import { inject, TestBed } from '@angular/core/testing';
+
+import { CephfsService } from './cephfs.service';
+
+describe('CephfsService', () => {
+ beforeEach(() => {
+ TestBed.configureTestingModule({
+ imports: [HttpClientModule],
+ providers: [CephfsService]
+ });
+ });
+
+ it(
+ 'should be created',
+ inject([CephfsService], (service: CephfsService) => {
+ expect(service).toBeTruthy();
+ })
+ );
+});
--- /dev/null
+import { HttpClient } from '@angular/common/http';
+import { Injectable } from '@angular/core';
+
+@Injectable()
+export class CephfsService {
+ baseURL = '/api/cephfs';
+
+ constructor(private http: HttpClient) {}
+
+ getCephfs(id) {
+ return this.http.get(`${this.baseURL}/data/${id}`);
+ }
+
+ getClients(id) {
+ return this.http.get(`${this.baseURL}/clients/${id}`);
+ }
+
+ getMdsCounters(id) {
+ return this.http.get(`${this.baseURL}/mds_counters/${id}`);
+ }
+}
--- /dev/null
+<nav aria-label="breadcrumb">
+ <ol class="breadcrumb">
+ <li class="breadcrumb-item">Filesystem</li>
+ <li class="breadcrumb-item active"
+ aria-current="page">{{ name }}</li>
+ </ol>
+</nav>
+
+<div class="row">
+ <div class="col-md-12">
+ <i class="fa fa-desktop"></i>
+ <a [routerLink]="['/cephfs/' + id + '/clients']">
+ <span style="font-weight:bold;">{{ clientCount }}</span>
+ Clients
+ </a>
+ </div>
+</div>
+
+<div class="row">
+ <div class="col-sm-6">
+ <fieldset>
+ <legend>Ranks</legend>
+
+ <cd-table [data]="ranks.data"
+ [columns]="ranks.columns"
+ toolHeader="false">
+ </cd-table>
+ </fieldset>
+
+ <cd-table-key-value [data]="standbys">
+ </cd-table-key-value>
+ </div>
+
+ <div class="col-sm-6">
+ <fieldset>
+ <legend>Pools</legend>
+
+ <cd-table [data]="pools.data"
+ [columns]="pools.columns"
+ toolHeader="false">
+ </cd-table>
+
+ </fieldset>
+ </div>
+</div>
+
+<div class="row"
+ *ngFor="let mdsCounter of objectValues(mdsCounters)">
+ <div class="cold-md-12">
+ <div class="chart-container">
+ <canvas baseChart
+ [datasets]="mdsCounter.datasets"
+ [options]="mdsCounter.options"
+ [chartType]="mdsCounter.chartType">
+ </canvas>
+ </div>
+ </div>
+</div>
+
+<!-- templates -->
+<ng-template #poolProgressTmpl
+ let-row="row">
+ <progressbar type="danger"
+ [value]="row.used * 100.0 / row.avail">
+ </progressbar>
+</ng-template>
+
+<ng-template #activityTmpl
+ let-row="row"
+ let-value="value">
+ {{ row.state === 'standby-replay' ? 'Evts' : 'Reqs' }}: {{ value | dimless }} /s
+</ng-template>
--- /dev/null
+.chart-container {
+ position: relative;
+ margin: auto;
+ height: 500px;
+ width: 100%;
+}
+
+.progress {
+ margin-bottom: 0px;
+}
--- /dev/null
+import { async, ComponentFixture, TestBed } from '@angular/core/testing';
+import { RouterTestingModule } from '@angular/router/testing';
+
+import { ChartsModule } from 'ng2-charts/ng2-charts';
+import { ProgressbarModule } from 'ngx-bootstrap/progressbar';
+import { Observable } from 'rxjs/Observable';
+
+import { SharedModule } from '../../../shared/shared.module';
+import { CephfsService } from '../cephfs.service';
+import { CephfsComponent } from './cephfs.component';
+
+describe('CephfsComponent', () => {
+ let component: CephfsComponent;
+ let fixture: ComponentFixture<CephfsComponent>;
+
+ const fakeFilesystemService = {
+ getCephfs: id => {
+ return Observable.create(observer => {
+ return () => console.log('disposed');
+ });
+ },
+ getMdsCounters: id => {
+ return Observable.create(observer => {
+ return () => console.log('disposed');
+ });
+ }
+ };
+
+ beforeEach(
+ async(() => {
+ TestBed.configureTestingModule({
+ imports: [SharedModule, ChartsModule, RouterTestingModule, ProgressbarModule.forRoot()],
+ declarations: [CephfsComponent],
+ providers: [
+ { provide: CephfsService, useValue: fakeFilesystemService }
+ ]
+ }).compileComponents();
+ })
+ );
+
+ beforeEach(() => {
+ fixture = TestBed.createComponent(CephfsComponent);
+ component = fixture.componentInstance;
+ fixture.detectChanges();
+ });
+
+ it('should create', () => {
+ expect(component).toBeTruthy();
+ });
+});
--- /dev/null
+import { Component, OnDestroy, OnInit, TemplateRef, ViewChild } from '@angular/core';
+import { ActivatedRoute } from '@angular/router';
+
+import * as _ from 'lodash';
+
+import { ViewCacheStatus } from '../../../shared/enum/view-cache-status.enum';
+import { DimlessBinaryPipe } from '../../../shared/pipes/dimless-binary.pipe';
+import { DimlessPipe } from '../../../shared/pipes/dimless.pipe';
+import { CephfsService } from '../cephfs.service';
+
+@Component({
+ selector: 'cd-cephfs',
+ templateUrl: './cephfs.component.html',
+ styleUrls: ['./cephfs.component.scss']
+})
+export class CephfsComponent implements OnInit, OnDestroy {
+ @ViewChild('poolProgressTmpl') poolProgressTmpl: TemplateRef<any>;
+ @ViewChild('activityTmpl') activityTmpl: TemplateRef<any>;
+
+ routeParamsSubscribe: any;
+
+ objectValues = Object.values;
+
+ single: any[];
+ multi: any[];
+
+ view: any[] = [700, 400];
+
+ id: number;
+ name: string;
+ ranks: any;
+ pools: any;
+ standbys = [];
+ clientCount: number;
+
+ mdsCounters = {};
+
+ lhsCounter = 'mds.inodes';
+ rhsCounter = 'mds_server.handle_client_request';
+ charts = {};
+ interval: any;
+
+ constructor(
+ private route: ActivatedRoute,
+ private cephfsService: CephfsService,
+ private dimlessBinary: DimlessBinaryPipe,
+ private dimless: DimlessPipe
+ ) {}
+
+ ngOnInit() {
+ this.ranks = {
+ columns: [
+ { prop: 'rank' },
+ { prop: 'state' },
+ { prop: 'mds', name: 'Daemon' },
+ { prop: 'activity', cellTemplate: this.activityTmpl },
+ { prop: 'dns', name: 'Dentries', pipe: this.dimless },
+ { prop: 'inos', name: 'Inodes', pipe: this.dimless }
+ ],
+ data: []
+ };
+
+ this.pools = {
+ columns: [
+ { prop: 'pool' },
+ { prop: 'type' },
+ { prop: 'used', pipe: this.dimlessBinary },
+ { prop: 'avail', pipe: this.dimlessBinary },
+ {
+ name: 'Usage',
+ cellTemplate: this.poolProgressTmpl,
+ comparator: (valueA, valueB, rowA, rowB, sortDirection) => {
+ const valA = rowA.used / rowA.avail;
+ const valB = rowB.used / rowB.avail;
+
+ if (valA === valB) {
+ return 0;
+ }
+
+ if (valA > valB) {
+ return 1;
+ } else {
+ return -1;
+ }
+ }
+ }
+ ],
+ data: []
+ };
+
+ this.routeParamsSubscribe = this.route.params.subscribe((params: { id: number }) => {
+ this.id = params.id;
+
+ this.ranks.data = [];
+ this.pools.data = [];
+ this.standbys = [];
+ this.mdsCounters = {};
+
+ this.refresh();
+ this.draw_chart();
+ });
+
+ this.interval = setInterval(() => {
+ this.refresh();
+ this.draw_chart();
+ }, 5000);
+ }
+
+ ngOnDestroy() {
+ clearInterval(this.interval);
+ this.routeParamsSubscribe.unsubscribe();
+ }
+
+ refresh() {
+ this.cephfsService.getCephfs(this.id).subscribe((data: any) => {
+ this.ranks.data = data.cephfs.ranks;
+ this.pools.data = data.cephfs.pools;
+ this.standbys = [
+ {
+ key: 'Standby daemons',
+ value: data.standbys.map(value => value.name).join(', ')
+ }
+ ];
+ this.name = data.cephfs.name;
+ this.clientCount = data.cephfs.client_count;
+ });
+ }
+
+ draw_chart() {
+ this.cephfsService.getMdsCounters(this.id).subscribe(data => {
+ const topChart = true;
+
+ const oldKeys = Object.keys(this.mdsCounters);
+ const newKeys = Object.keys(data);
+
+ _.each(this.mdsCounters, (value, key) => {
+ if (data[key] === undefined) {
+ delete this.mdsCounters[key];
+ }
+ });
+
+ _.each(data, (mdsData, mdsName) => {
+ const lhsData = this.convert_timeseries(mdsData[this.lhsCounter]);
+ const rhsData = this.delta_timeseries(mdsData[this.rhsCounter]);
+
+ if (this.mdsCounters[mdsName] === undefined) {
+ const elem = {
+ datasets: [
+ {
+ label: this.lhsCounter,
+ yAxisID: 'LHS',
+ data: lhsData,
+ tension: 0.1
+ },
+ {
+ label: this.rhsCounter,
+ yAxisID: 'RHS',
+ data: rhsData,
+ tension: 0.1
+ }
+ ],
+ options: {
+ responsive: true,
+ maintainAspectRatio: false,
+ legend: {
+ position: 'top',
+ display: topChart
+ },
+ scales: {
+ xAxes: [
+ {
+ position: 'top',
+ type: 'time',
+ display: topChart,
+ time: {
+ displayFormats: {
+ quarter: 'MMM YYYY'
+ }
+ }
+ }
+ ],
+ yAxes: [
+ {
+ id: 'LHS',
+ type: 'linear',
+ position: 'left',
+ min: 0
+ },
+ {
+ id: 'RHS',
+ type: 'linear',
+ position: 'right',
+ min: 0
+ }
+ ]
+ }
+ },
+ chartType: 'line'
+ };
+
+ this.mdsCounters[mdsName] = elem;
+ } else {
+ this.mdsCounters[mdsName].datasets[0].data = lhsData;
+ this.mdsCounters[mdsName].datasets[1].data = rhsData;
+ }
+ });
+ });
+ }
+
+ // Convert ceph-mgr's time series format (list of 2-tuples
+ // with seconds-since-epoch timestamps) into what chart.js
+ // can handle (list of objects with millisecs-since-epoch
+ // timestamps)
+ convert_timeseries(sourceSeries) {
+ const data = [];
+ _.each(sourceSeries, dp => {
+ data.push({
+ x: dp[0] * 1000,
+ y: dp[1]
+ });
+ });
+
+ return data;
+ }
+
+ delta_timeseries(sourceSeries) {
+ let i;
+ let prev = sourceSeries[0];
+ const result = [];
+ for (i = 1; i < sourceSeries.length; i++) {
+ const cur = sourceSeries[i];
+ const tdelta = cur[0] - prev[0];
+ const vdelta = cur[1] - prev[1];
+ const rate = vdelta / tdelta;
+
+ result.push({
+ x: cur[0] * 1000,
+ y: rate
+ });
+
+ prev = cur;
+ }
+ return result;
+ }
+}
--- /dev/null
+<nav aria-label="breadcrumb">
+ <ol class="breadcrumb">
+ <li class="breadcrumb-item">Filesystem</li>
+ <li class="breadcrumb-item">
+ <a [routerLink]="['/cephfs/' + id]">{{ name }}</a>
+ </li>
+ <li class="breadcrumb-item active"
+ aria-current="page">
+ Clients
+ </li>
+ </ol>
+</nav>
+
+<fieldset>
+ <cd-view-cache [status]="viewCacheStatus"></cd-view-cache>
+
+ <cd-table [data]="clients.data"
+ [columns]="clients.columns"
+ [header]="false">
+ </cd-table>
+</fieldset>
--- /dev/null
+import { async, ComponentFixture, TestBed } from '@angular/core/testing';
+import { RouterTestingModule } from '@angular/router/testing';
+
+import { Observable } from 'rxjs/Observable';
+
+import { SharedModule } from '../../../shared/shared.module';
+import { CephfsService } from '../cephfs.service';
+import { ClientsComponent } from './clients.component';
+
+describe('ClientsComponent', () => {
+ let component: ClientsComponent;
+ let fixture: ComponentFixture<ClientsComponent>;
+
+ const fakeFilesystemService = {
+ getCephfs: id => {
+ return Observable.create(observer => {
+ return () => console.log('disposed');
+ });
+ },
+ getClients: id => {
+ return Observable.create(observer => {
+ return () => console.log('disposed');
+ });
+ }
+ };
+
+ beforeEach(
+ async(() => {
+ TestBed.configureTestingModule({
+ imports: [RouterTestingModule, SharedModule],
+ declarations: [ClientsComponent],
+ providers: [{ provide: CephfsService, useValue: fakeFilesystemService }]
+ }).compileComponents();
+ })
+ );
+
+ beforeEach(() => {
+ fixture = TestBed.createComponent(ClientsComponent);
+ component = fixture.componentInstance;
+ fixture.detectChanges();
+ });
+
+ it('should create', () => {
+ expect(component).toBeTruthy();
+ });
+});
--- /dev/null
+import { Component, OnDestroy, OnInit } from '@angular/core';
+import { ActivatedRoute } from '@angular/router';
+
+import { ViewCacheStatus } from '../../../shared/enum/view-cache-status.enum';
+import { CephfsService } from '../cephfs.service';
+
+@Component({
+ selector: 'cd-clients',
+ templateUrl: './clients.component.html',
+ styleUrls: ['./clients.component.scss']
+})
+export class ClientsComponent implements OnInit, OnDestroy {
+ routeParamsSubscribe: any;
+
+ id: number;
+ name: string;
+ clients: any;
+ viewCacheStatus: ViewCacheStatus;
+
+ interval: any;
+
+ constructor(private route: ActivatedRoute, private cephfsService: CephfsService) {}
+
+ ngOnInit() {
+ this.clients = {
+ columns: [
+ { prop: 'id' },
+ { prop: 'type' },
+ { prop: 'state' },
+ { prop: 'version' },
+ { prop: 'hostname', name: 'Host' },
+ { prop: 'root' }
+ ],
+ data: []
+ };
+
+ this.routeParamsSubscribe = this.route.params.subscribe((params: { id: number }) => {
+ this.id = params.id;
+ this.clients.data = [];
+ this.viewCacheStatus = ViewCacheStatus.ValueNone;
+
+ this.cephfsService.getCephfs(this.id).subscribe((data: any) => {
+ this.name = data.cephfs.name;
+ });
+
+ this.refresh();
+ });
+
+ this.interval = setInterval(() => {
+ this.refresh();
+ }, 5000);
+ }
+
+ ngOnDestroy() {
+ clearInterval(this.interval);
+ this.routeParamsSubscribe.unsubscribe();
+ }
+
+ refresh() {
+ this.cephfsService.getClients(this.id).subscribe((data: any) => {
+ this.viewCacheStatus = data.status;
+ this.clients.data = data.data;
+ });
+ }
+}
<span>Dashboard</span>
</a>
</li>
+
+ <li dropdown
+ routerLinkActive="active"
+ class="dropdown tc_menuitem tc_menuitem_cephs">
+ <a dropdownToggle
+ class="dropdown-toggle"
+ data-toggle="dropdown">
+ <ng-container i18n>Filesystems</ng-container>
+ <span class="caret"></span>
+ </a>
+ <ul *dropdownMenu
+ class="dropdown-menu">
+ <li routerLinkActive="active"
+ class="tc_submenuitem tc_submenuitem_cephfs_fs"
+ *ngFor="let fs of topLevelData?.filesystems">
+ <a i18n
+ class="dropdown-item"
+ routerLink="/cephfs/{{fs.id}}">{{ fs.name }}
+ </a>
+ </li>
+ <li class="tc_submenuitem tc_submenuitem_cephfs_nofs"
+ *ngIf="topLevelData.filesystems.length === 0">
+ <span i18n>There are no filesystems</span>
+ </li>
+ </ul>
+ </li>
<!--
<li routerLinkActive="active"
class="tc_menuitem tc_menuitem_ceph_osds">
import 'core-js/es6/string';
import 'core-js/es6/symbol';
import 'core-js/es6/weak-map';
+import 'core-js/es7/object';
/** IE10 and IE11 requires the following for NgClass support on SVG elements */
// import 'classlist.js'; // Run `npm install --save classlist.js`.
--- /dev/null
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from .helper import ControllerTestCase, authenticate
+
+
+class CephfsTest(ControllerTestCase):
+ @authenticate
+ def test_cephfs_clients(self):
+ data = self._get("/api/cephfs/clients/1")
+ self.assertStatus(200)
+
+ self.assertIn('status', data)
+ self.assertIn('data', data)
+
+ @authenticate
+ def test_cephfs_data(self):
+ data = self._get("/api/cephfs/data/1/")
+ self.assertStatus(200)
+
+ self.assertIn('cephfs', data)
+ self.assertIn('standbys', data)
+ self.assertIn('versions', data)
+ self.assertIsNotNone(data['cephfs'])
+ self.assertIsNotNone(data['standbys'])
+ self.assertIsNotNone(data['versions'])
+
+ @authenticate
+ def test_cephfs_mds_counters(self):
+ data = self._get("/api/cephfs/mds_counters/1")
+ self.assertStatus(200)
+
+ self.assertIsInstance(data, dict)
+ self.assertIsNotNone(data)