From: Nizamudeen A Date: Thu, 20 Mar 2025 08:51:50 +0000 (+0530) Subject: mgr/dashboard: use existing pools for cephfs vol creation X-Git-Tag: v20.3.0~280^2~1 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=8628b46c987296d269aabca2bdb9acc1e10a050f;p=ceph.git mgr/dashboard: use existing pools for cephfs vol creation We can use the newly introduced data and metadata params to create a vol with those pools. UI is being intelligent by filtering out the used pools and only uses the pools that are labeled by cephfs and also not in use. To figure out a pool is in use or not, we are fetching the pool stats and checking its used_bytes. Note: Using ec pools for data pool layout is something discouraged according to offical doc: https://docs.ceph.com/en/latest/cephfs/createfs/#creating-a-file-system We can force it but for now I have disabled it entirely in the dashboard unless people say its okay to do it. One more extra thing I am doing here is to add a note on deleting a filesystem that the underlying pools and mds daemons will be removed. Fixes: https://tracker.ceph.com/issues/70600 Signed-off-by: Nizamudeen A --- diff --git a/src/pybind/mgr/dashboard/controllers/cephfs.py b/src/pybind/mgr/dashboard/controllers/cephfs.py index d05b7551365..90e2f448b43 100644 --- a/src/pybind/mgr/dashboard/controllers/cephfs.py +++ b/src/pybind/mgr/dashboard/controllers/cephfs.py @@ -4,7 +4,7 @@ import errno import json import os from collections import defaultdict -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional import cephfs import cherrypy @@ -17,7 +17,8 @@ from ..services.cephfs import CephFS as CephFS_ from ..services.exception import handle_cephfs_error from ..tools import ViewCache, str_to_bool from . import APIDoc, APIRouter, DeletePermission, Endpoint, EndpointDoc, \ - RESTController, UIRouter, UpdatePermission, allow_empty_body + ReadPermission, RESTController, UIRouter, UpdatePermission, \ + allow_empty_body GET_QUOTAS_SCHEMA = { 'max_bytes': (int, ''), @@ -42,10 +43,15 @@ class CephFS(RESTController): self.cephfs_clients = {} def list(self): - fsmap = mgr.get("fs_map") - return fsmap['filesystems'] - - def create(self, name: str, service_spec: Dict[str, Any]): + return CephFS_.list_filesystems(all_info=True) + + def create( + self, + name: str, + service_spec: Dict[str, Any], + data_pool: Optional[str] = None, + metadata_pool: Optional[str] = None + ): service_spec_str = '1 ' if 'labels' in service_spec['placement']: for label in service_spec['placement']['labels']: @@ -56,8 +62,17 @@ class CephFS(RESTController): service_spec_str += f'{host} ' service_spec_str = service_spec_str[:-1] - error_code, _, err = mgr.remote('volumes', '_cmd_fs_volume_create', None, - {'name': name, 'placement': service_spec_str}) + error_code, _, err = mgr.remote( + 'volumes', + '_cmd_fs_volume_create', + None, + { + 'name': name, + 'placement': service_spec_str, + 'data_pool': data_pool, + 'meta_pool': metadata_pool + } + ) if error_code != 0: raise RuntimeError( f'Error creating volume {name} with placement {str(service_spec)}: {err}') @@ -720,6 +735,19 @@ class CephFsUi(CephFS): paths = [] return paths + @Endpoint('GET', path='/used-pools') + @ReadPermission + def ls_used_pools(self): + """ + This API is created just to list all the used pools to the UI + so that it can be used for different validation purposes within + the UI + """ + pools = [] + for fs in CephFS_.list_filesystems(all_info=True): + pools.extend(fs['mdsmap']['data_pools'] + [fs['mdsmap']['metadata_pool']]) + return pools + @APIRouter('/cephfs/subvolume', Scope.CEPHFS) @APIDoc('CephFS Subvolume Management API', 'CephFSSubvolume') diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.html b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.html index a7708aa496f..cb8f101a847 100644 --- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.html +++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.html @@ -53,6 +53,107 @@ +
+ Use existing pools + Allows you to use replicated pools with 'cephfs' application tag that are already created. + + + + You need to have atleast 2 pools that are empty, applied with cephfs label and not erasure-coded. + +
+ + +
+ Data pool + + + + + + + + + + This field is required! + +
+ + +
+ Metadata pool + + + + + + + + + + This field is required! + +
+
{ let component: CephfsVolumeFormComponent; @@ -29,7 +35,8 @@ describe('CephfsVolumeFormComponent', () => { GridModule, InputModule, SelectModule, - ComboBoxModule + ComboBoxModule, + CheckboxModule ], declarations: [CephfsVolumeFormComponent] }); diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.ts index c0373a9fb77..3681bfeff7e 100644 --- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.ts +++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.ts @@ -19,6 +19,8 @@ import { CdValidators } from '~/app/shared/forms/cd-validators'; import { FinishedTask } from '~/app/shared/models/finished-task'; import { Permission } from '~/app/shared/models/permissions'; import { TaskWrapperService } from '~/app/shared/services/task-wrapper.service'; +import { PoolService } from '~/app/shared/api/pool.service'; +import { Pool } from '../../pool/pool'; @Component({ selector: 'cd-cephfs-form', @@ -51,6 +53,9 @@ export class CephfsVolumeFormComponent extends CdForm implements OnInit { fsId: number; disableRename: boolean = true; hostsAndLabels$: Observable<{ hosts: any[]; labels: any[] }>; + pools: Pool[] = []; + dataPools: Pool[] = []; + metadatPools: Pool[] = []; fsFailCmd: string; fsSetCmd: string; @@ -66,7 +71,8 @@ export class CephfsVolumeFormComponent extends CdForm implements OnInit { public actionLabels: ActionLabelsI18n, private hostService: HostService, private cephfsService: CephfsService, - private route: ActivatedRoute + private route: ActivatedRoute, + private poolService: PoolService ) { super(); this.editing = this.router.url.startsWith(`/cephfs/fs/${URLVerbs.EDIT}`); @@ -94,7 +100,20 @@ export class CephfsVolumeFormComponent extends CdForm implements OnInit { }) ] ], - unmanaged: [false] + unmanaged: [false], + customPools: [false], + dataPool: [ + null, + CdValidators.requiredIf({ + customPools: true + }) + ], + metadataPool: [ + null, + CdValidators.requiredIf({ + customPools: true + }) + ] }); this.orchService.status().subscribe((status) => { this.hasOrchestrator = status.available; @@ -111,6 +130,15 @@ export class CephfsVolumeFormComponent extends CdForm implements OnInit { this.cephfsService.getCephfs(this.fsId).subscribe((resp: object) => { this.currentVolumeName = resp['cephfs']['name']; this.form.get('name').setValue(this.currentVolumeName); + const dataPool = + resp['cephfs'].pools.find((pool: Pool) => pool.type === 'data')?.pool || ''; + const metaPool = + resp['cephfs'].pools.find((pool: Pool) => pool.type === 'metadata')?.pool || ''; + this.form.get('dataPool').setValue(dataPool); + this.form.get('metadataPool').setValue(metaPool); + + this.form.get('dataPool').disable(); + this.form.get('metadataPool').disable(); this.disableRename = !( !resp['cephfs']['flags']['joinable'] && resp['cephfs']['flags']['refuse_client_session'] @@ -122,6 +150,27 @@ export class CephfsVolumeFormComponent extends CdForm implements OnInit { } }); } else { + forkJoin({ + usedPools: this.cephfsService.getUsedPools(), + pools: this.poolService.getList() + }).subscribe(({ usedPools, pools }) => { + // filtering pools if + // * pool is labelled with cephfs + // * its not already used by cephfs + // * its not erasure coded + // * and only if its empty + const filteredPools = Object.values(pools).filter( + (pool: Pool) => + this.cephfsService.isCephFsPool(pool) && + !usedPools.includes(pool.pool) && + pool.type !== 'erasure' && + pool.stats.bytes_used.latest === 0 + ); + if (filteredPools.length < 2) this.form.get('customPools').disable(); + this.pools = filteredPools; + this.metadatPools = this.dataPools = this.pools; + }); + this.hostsAndLabels$ = forkJoin({ hosts: this.hostService.getAllHosts(), labels: this.hostService.getLabels() @@ -136,6 +185,12 @@ export class CephfsVolumeFormComponent extends CdForm implements OnInit { this.loadingReady(); } + onPoolChange(poolName: string, metadataChange = false) { + if (!metadataChange) { + this.metadatPools = this.pools.filter((pool: Pool) => pool.pool_name != poolName); + } else this.dataPools = this.pools.filter((pool: Pool) => pool.pool_name !== poolName); + } + multiSelector(event: any, field: 'label' | 'hosts') { if (field === 'label') this.selectedLabels = event.map((label: any) => label.content); else this.selectedHosts = event.map((host: any) => host.content); @@ -178,6 +233,9 @@ export class CephfsVolumeFormComponent extends CdForm implements OnInit { break; } + const dataPool = values['dataPool']; + const metadataPool = values['metadataPool']; + const self = this; let taskUrl = `${BASE_URL}/${URLVerbs.CREATE}`; this.taskWrapperService @@ -185,7 +243,12 @@ export class CephfsVolumeFormComponent extends CdForm implements OnInit { task: new FinishedTask(taskUrl, { volumeName: volumeName }), - call: this.cephfsService.create(this.form.get('name').value, serviceSpec) + call: this.cephfsService.create( + this.form.get('name').value, + serviceSpec, + dataPool, + metadataPool + ) }) .subscribe({ error() { diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.html b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.html index 89a825bdd98..7b9934a6020 100644 --- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.html +++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.html @@ -20,3 +20,10 @@
+ + + + This will remove its data and metadata pools. It'll also remove the MDS daemon associated with the volume. + + diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.ts index 383cd0ece4d..005071b1f90 100644 --- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.ts +++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-list/cephfs-list.component.ts @@ -1,4 +1,4 @@ -import { Component, OnInit } from '@angular/core'; +import { Component, OnInit, TemplateRef, ViewChild } from '@angular/core'; import { Permissions } from '~/app/shared/models/permissions'; import { Router } from '@angular/router'; @@ -37,6 +37,9 @@ const BASE_URL = 'cephfs/fs'; providers: [{ provide: URLBuilderService, useValue: new URLBuilderService(BASE_URL) }] }) export class CephfsListComponent extends ListWithDetails implements OnInit { + @ViewChild('deleteTpl', { static: true }) + deleteTpl: TemplateRef; + columns: CdTableColumn[]; filesystems: any = []; selection = new CdTableSelection(); @@ -178,6 +181,7 @@ export class CephfsListComponent extends ListWithDetails implements OnInit { itemDescription: 'File System', itemNames: [volName], actionDescription: 'remove', + bodyTemplate: this.deleteTpl, submitActionObservable: () => this.taskWrapper.wrapTaskAroundCall({ task: new FinishedTask('cephfs/remove', { volumeName: volName }), diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs.service.ts b/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs.service.ts index 2d49de37c08..07235390b8e 100644 --- a/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs.service.ts +++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs.service.ts @@ -79,10 +79,15 @@ export class CephfsService { }); } - create(name: string, serviceSpec: object) { + create(name: string, serviceSpec: object, dataPool = '', metadataPool = '') { return this.http.post( this.baseURL, - { name: name, service_spec: serviceSpec }, + { + name: name, + service_spec: serviceSpec, + data_pool: dataPool, + metadata_pool: metadataPool + }, { observe: 'response' } @@ -117,4 +122,8 @@ export class CephfsService { root_squash: rootSquash }); } + + getUsedPools(): Observable { + return this.http.get(`${this.baseUiURL}/used-pools`); + } } diff --git a/src/pybind/mgr/dashboard/openapi.yaml b/src/pybind/mgr/dashboard/openapi.yaml index e1e5fab12df..39d262c25ae 100755 --- a/src/pybind/mgr/dashboard/openapi.yaml +++ b/src/pybind/mgr/dashboard/openapi.yaml @@ -1692,6 +1692,10 @@ paths: application/json: schema: properties: + data_pool: + type: string + metadata_pool: + type: string name: type: string service_spec: diff --git a/src/pybind/mgr/dashboard/services/cephfs.py b/src/pybind/mgr/dashboard/services/cephfs.py index 6a3cd6b72ba..3e9b9e5bae2 100644 --- a/src/pybind/mgr/dashboard/services/cephfs.py +++ b/src/pybind/mgr/dashboard/services/cephfs.py @@ -14,8 +14,11 @@ logger = logging.getLogger('cephfs') class CephFS(object): @classmethod - def list_filesystems(cls): + def list_filesystems(cls, all_info=False): fsmap = mgr.get("fs_map") + + if all_info: + return fsmap['filesystems'] return [{'id': fs['id'], 'name': fs['mdsmap']['fs_name']} for fs in fsmap['filesystems']]