--- /dev/null
- "version_date": "2014-02-10 10:17:56.283499",
- "version_id": 10
+# Ceph-brag
+
+`ceph-brag` is going to be an anonymized cluster reporting tool designed to collect a "registry" of Ceph clusters for community knowledge.
+This data will be displayed on a public web page using UUID by default, but users can claim their cluster and publish information about ownership if they so desire.
+
+For more information please visit:
+
+* [Blueprint](http://wiki.ceph.com/Planning/Blueprints/Firefly/Ceph-Brag)
+* [CDS Etherpad](http://pad.ceph.com/p/cdsfirefly-ceph-brag)
+
+# Client
+
+## How to use:
+
+### Pre-requisites:
+ceph-brag uses 'ceph' python script. Hence, before executing ceph-brag script ensure that ceph services are all running and 'ceph' script is in 'PATH' environment
+
+### Runtime instructions:
+Run 'ceph-brag -h' to get the usage information of this tool.
+
+### Sample output:
+
+ {
+ "cluster_creation_date": "2014-01-16 13:38:41.928551",
+ "uuid": "20679d0e-04b1-4004-8ee9-45ac271510e9",
+ "components_count": {
+ "bytes": {
+ "count": 0,
+ "scale": "bytes"
+ },
+ "osds": 1,
+ "objects": 0,
+ "pgs": 192,
+ "pools": 3,
+ "mdss": 1,
+ "mons": 1
+ },
+ "crush_types": [
+ "osd",
+ "host",
+ "chassis",
+ "rack",
+ "row",
+ "pdu",
+ "pod",
+ "room",
+ "datacenter",
+ "region",
+ "root"
+ ],
+ "ownership": {
+ "organization": "eNovance",
+ "description": "Use case1",
+ "email": "mail@enovance.com",
+ "name": "Cluster1"
+ },
+ "pool_metadata": [
+ {
+ "rep_size": 3,
+ "id": "0",
+ "name": "data"
+ },
+ {
+ "rep_size": 3,
+ "id": "1",
+ "name": "metadata"
+ },
+ {
+ "rep_size": 3,
+ "id": "2",
+ "name": "rbd"
+ }
+ ],
+ "sysinfo": [
+ {
+ "nw_info": {
+ "hostname": "ceph-brag",
+ "address": "127.0.0.1"
+ },
+ "hw_info": {
+ "swap_kb": 0,
+ "arch": "x86_64",
+ "cpu": "Intel Xeon E312xx (Sandy Bridge)",
+ "mem_kb": 2051648
+ },
+ "id": 0,
+ "os_info": {
+ "version": "3.2.0-23-virtual",
+ "os": "Linux",
+ "description": "#36-Ubuntu SMP Tue Apr 10 22:29:03 UTC 2012",
+ "distro": "Ubuntu 12.04 precise (Ubuntu 12.04 LTS)"
+ },
+ "ceph_version": "ceph version 0.75-229-g4050eae (4050eae32cd77a1c210ca11d0f12c74daecb1bd3)"
+ }
+ ]
+ }
+
+
+# Server
+
+## Info
+The ceph-brag server code is a python based web application.
+
+## How to use
+
+### Prerequisites
+* [pecan](http://pecanpy.org) is the web framework that is used by this application.
+* [sqlalchemy](www.sqlalchemy.org) is the ORM that is used by this application
+
+### How to deploy
+* [Common recipes to deploy](http://pecan.readthedocs.org/en/latest/deployment.html#common-recipes)
++* Modify server/config.py:sqlalchemy['url'] to point the correct database connection
+
+## URLs
+Following are the REST urls that are implemented with 'url-prefix' being the mount point for the WSGI script
+
+### GET
+
+##### * GET /url-prefix/
+Returns the list of clusters that are registered so far.
+Outputs - On success application/json of the following format is returned
+
+ [
+ {
+ "num_versions": 3,
+ "cluster_creation_date": "2014-01-16 13:38:41.928551",
+ "uuid": "20679d0e-04b1-4004-8ee9-45ac271510e9",
+ "cluster_name": "Cluster1",
+ "organization": "eNovance",
+ "email": "mail@enovance.com"
+ },
+ ...
+ ]
+
+##### * GET /url-prefix/UUID
+Returns the list of version information for a particular UUID.
+Outputs - On success application/json of the following format is returned
+
+ [
+ {
+ "version_number": 1,
++ "version_date": "2014-02-10 10:17:56.283499"
+ },
+ ...
+ ]
+
+##### * GET /url-prefix/UUID/version\_number
+Returns the entire brag report as mentioned in client's sample output for a particular version of a UUID
+
+### PUT
+
+##### * PUT /url-prefix
+Uploads the brag report and creates a new version for the UUID mentioned in the payload
+
+### DELETE
+
+##### * DELETE /url-prefix?uuid=xxxx
+Deletes all the versions of a cluster whose UUID is sent as a parameter
+
+
--- /dev/null
- meta['ceph_version'] = jmeta['ceph_version']
+#!/usr/bin/env python
+
+import subprocess
+import uuid
+import re
+import json
+import sys
+import ast
+import requests
+
+CLUSTER_UUID_NAME='cluster-uuid'
+CLUSTER_OWNERSHIP_NAME='cluster-ownership'
+
+def run_command(cmd):
+ child = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (o, e) = child.communicate()
+ return (child.returncode, o, e)
+
+def get_uuid():
+ (rc,uid,e) = run_command(['ceph', 'config-key', 'get', CLUSTER_UUID_NAME])
+ if rc is not 0:
+ #uuid is not yet set.
+ uid = str(uuid.uuid4())
+ (rc, o, e) = run_command(['ceph', 'config-key', 'put',
+ CLUSTER_UUID_NAME, uid])
+ if rc is not 0:
+ raise RuntimeError("\'ceph config-key put\' failed -" + e)
+
+ return uid
+
+def get_cluster_creation_date():
+ (rc, o, e) = run_command(['ceph', 'mon', 'dump'])
+ if rc is not 0:
+ raise RuntimeError("\'ceph mon dump\' failed - " + e)
+
+ rec = re.compile('(.*created\ )(.*)(\n.*)')
+
+ mo = rec.search(o);
+ if mo and mo.group(2) != '0.000000':
+ return mo.group(2)
+
+ # Try and get the date from osd dump
+ (rc, o, e) = run_command(['ceph', 'osd', 'dump'])
+ if rc is not 0:
+ raise RuntimeError("\'ceph osd dump\' failed - " + e)
+
+ mo = rec.search(o);
+ if not mo or mo.group(2) == '0.000000':
+ print >> sys.stderr, "Unable to get cluster creation date"
+
+ return mo.group(2)
+
+def get_nums():
+ (rc, o, e) = run_command(['ceph', '-s'])
+ if rc is not 0:
+ raise RuntimeError("\'ceph -s\' failed - " + e)
+
+ num_mons = 0
+
+ mo = re.search('(.*monmap\ .*:\ )(\d+)(.*)', o)
+ if not mo:
+ raise RuntimeError("Unmatched pattern for monmap in \'ceph status\'")
+ else:
+ num_mons = int(mo.group(2))
+
+ num_osds = 0
+ mo = re.search('.*osdmap.*(\d+).*(\d+).*(\d+).*', o)
+ if not mo:
+ raise RuntimeError("Unmatched pattern for osdmap in \'ceph status\'")
+ else:
+ num_osds = int(mo.group(1))
+
+ num_mdss = 0
+ mo = re.search('.*mdsmap\ e\d+.*(\d+)/(\d+)/(\d+).*', o)
+ if mo:
+ num_mdss = int(mo.group(2));
+
+ num_pgs = 0
+ num_pools = 0
+ num_bytes = 0
+ num_objs = 0
+ mo = re.search('.*pgmap\ v\d+:\ (\d+).*,\ (\d+).*,\ (\d+)\ (\S+)\ data,\ (\d+).*', o)
+ if not mo:
+ raise RuntimeError("Unmatched pattern for pgmap in \'ceph status\'")
+ else:
+ num_pgs = int(mo.group(1))
+ num_pools = int(mo.group(2))
+ byte_count = int(mo.group(3))
+ byte_scale = mo.group(4)
+ num_objs = int(mo.group(5))
+ nums = {'mons':num_mons,
+ 'osds':num_osds,
+ 'mdss':num_mdss,
+ 'pgs':num_pgs,
+ 'pools':num_pools,
+ 'bytes': {'count':byte_count, 'scale':byte_scale},
+ 'objects':num_objs}
+ return nums
+
+def get_crush_types():
+ (rc, o, e) = run_command(['ceph', 'osd', 'crush', 'dump'])
+ if rc is not 0:
+ raise RuntimeError("\'ceph osd crush dump\' failed - " + e)
+
+ crush_dump = json.loads(o)
+ if crush_dump['types'] is None:
+ raise RuntimeError("\'types\' item missing in \'ceph osd crush dump\'")
+
+ crush_types = []
+ for t in crush_dump['types']:
+ crush_types.append(t['name'])
+
+ return crush_types
+
+def get_pool_metadata():
+ (rc, o, e) = run_command(['ceph', 'osd', 'dump'])
+ if rc is not 0:
+ raise RuntimeError("\'ceph osd dump\' failed - " + e)
+
+ result = re.findall("pool\ (\d+)\ '(\S+)'\ rep\ size\ (\d+)", o)
+ if len(result) is 0:
+ #Check with replicated size
+ result = re.findall("pool\ (\d+)\ '(\S+)'\ replicated\ size\ (\d+)", o)
+ if len(result) is 0:
+ raise RuntimeError("Unmatched pattern for \'pool\' in \'ceph osd dump\'")
+
+ pool_meta = []
+ proc = lambda x: {'id':x[0], 'name':x[1], 'rep_size':int(x[2])}
+ for r in result:
+ pool_meta.append(proc(r))
+
+ return pool_meta
+
+def get_sysinfo(max_osds):
+ sysinfo = []
+ count = 0
+ osd_metadata_available = False
+
+ while count < max_osds:
+ meta = {'id':count}
+ (rc, o, e) = run_command(['ceph', 'osd', 'metadata', str(count)])
+ if rc is 0:
+ if osd_metadata_available is False:
+ osd_metadata_available = True
+ os_info = {}
+ hw_info = {}
+ nw_info = {}
+
+ jmeta = json.loads(o)
+
++ version = jmeta['ceph_version'].split()
++ meta['ceph_version'] = version[2]
++ if (len(version) > 3):
++ meta['ceph_version'] += version[3]
+
+ os_info['os'] = jmeta['os']
+ os_info['version'] = jmeta['kernel_version']
+ os_info['description'] = jmeta['kernel_description']
+
+ try:
+ distro = jmeta['distro'] + ' '
+ distro += jmeta['distro_version'] + ' '
+ distro += jmeta['distro_codename'] + ' ('
+ distro += jmeta['distro_description'] + ')'
+ os_info['distro'] = distro
+ except KeyError as ke:
+ pass
+ meta['os_info'] = os_info
+
+ hw_info['cpu'] = jmeta['cpu']
+ hw_info['arch'] = jmeta['arch']
+ hw_info['mem_kb'] = int(jmeta['mem_total_kb'])
+ hw_info['swap_kb'] = int(jmeta['mem_swap_kb'])
+ meta['hw_info'] = hw_info
+
+ (ip, hname) = get_osd_host(count)
+ nw_info['address'] = ip
+ nw_info['hostname'] = hname
+ meta['nw_info'] = nw_info
+
+ sysinfo.append(meta)
+ count = count + 1
+
+ if osd_metadata_available is False:
+ print >> sys.stderr, "'ceph osd metadata' is not available at all"
+
+ return sysinfo
+
+def get_osd_host(osd_id):
+ loc = {}
+
+ (rc, o, e) = run_command(['ceph', 'osd', 'find', str(osd_id)])
+ if rc is not 0:
+ raise RuntimeError("\'ceph osd find\' failed - " + e)
+
+ jloc = json.loads(o)
+
+ mo = re.search("(\d+.\d+.\d+.\d+).*", jloc['ip'])
+ if mo is None:
+ #Might be in ipv6 format, TODO: Verify
+ return None;
+
+ ip = mo.group(1)
+ host = jloc['crush_location']['host']
+
+ return (ip, host)
+
+def get_ownership_info():
+ (rc, o, e) = run_command(['ceph', 'config-key', 'get',
+ CLUSTER_OWNERSHIP_NAME])
+ if rc is not 0:
+ return {}
+
+ return ast.literal_eval(o)
+
+def output_json():
+ out = {}
+ url = None
+
+ out['uuid'] = get_uuid()
+ out['cluster_creation_date'] = get_cluster_creation_date()
+ nums = get_nums()
+ num_osds = int(nums['osds'])
+ out['components_count'] = nums
+ out['crush_types'] = get_crush_types()
+ out['pool_metadata'] = get_pool_metadata()
+ out['sysinfo'] = get_sysinfo(num_osds)
+
+ owner = get_ownership_info()
+ if owner is not None:
+ out['ownership'] = owner
+ if 'url' in owner:
+ url = owner.pop('url')
+
+ return json.dumps(out, indent=2, separators=(',', ': ')), url
+
+def describe_usage():
+ print >> sys.stderr, "Usage:"
+ print >> sys.stderr, "======\n"
+
+ print >> sys.stderr, sys.argv[0] + " <commands> [command-options]\n"
+ print >> sys.stderr, "commands:"
+ print >> sys.stderr, "publish - publish the brag report to the server"
+ print >> sys.stderr, "update-metadata <update-metadata-options> - Update"
+ print >> sys.stderr, " ownership information for bragging"
+ print >> sys.stderr, "clear-metadata - Clear information set by update-metadata"
+ print >> sys.stderr, "unpublish --yes-i-am-shy - delete the brag report from the server"
+ print >> sys.stderr, ""
+
+ print >> sys.stderr, "update-metadata options:"
+ print >> sys.stderr, "--name= - Name of the cluster"
+ print >> sys.stderr, "--organization= - Name of the organization"
+ print >> sys.stderr, "--email= - Email contact address"
+ print >> sys.stderr, "--description= - Reporting use-case"
+ print >> sys.stderr, "--url= - The URL that is used to publish and unpublish"
+ print >> sys.stderr, ""
+
+def update_metadata():
+ info = {}
+ possibles = ['name', 'organization', 'email', 'description', 'url']
+
+ #get the existing values
+ info = get_ownership_info();
+
+ for index in range(2, len(sys.argv)):
+ mo = re.search("--(\S+)=(.*)", sys.argv[index])
+ if not mo:
+ describe_usage()
+ return 22
+
+ k = mo.group(1)
+ v = mo.group(2)
+
+ if k in possibles:
+ info[k] = v
+ else:
+ print >> sys.stderr, "Unexpect option --" + k
+ describe_usage()
+ return 22
+
+ (rc, o, e) = run_command(['ceph', 'config-key', 'put',
+ CLUSTER_OWNERSHIP_NAME, str(info)])
+ return rc
+
+def clear_metadata():
+ (rc, o, e) = run_command(['ceph', 'config-key', 'del',
+ CLUSTER_OWNERSHIP_NAME])
+ return rc
+
+def publish():
+ data, url = output_json()
+ if url is None:
+ print >> sys.stderr, "Cannot publish until a URL is set using update-metadata"
+ return 1
+
+ req = requests.put(url, data=data)
+ if req.status_code is not 201:
+ print >> sys.stderr, "Failed to publish, server responded with code " + str(req.status_code)
++ print >> sys.stderr, req.text
+ return 1
+
+ return 0
+
+def unpublish():
+ if len(sys.argv) <= 2 or sys.argv[2] != '--yes-i-am-shy':
+ print >> sys.stderr, "unpublish should be followed by --yes-i-am-shy"
+ return 22
+
+ fail = False
+ owner = get_ownership_info()
+ if owner is None:
+ fail = True
+ try:
+ url = owner['url']
+ except KeyError as e:
+ fail = True
+
+ if fail:
+ print >> sys.stderr, "URL is not updated yet"
+ return 1
+
+ uuid = get_uuid()
+
+ params = {'uuid':uuid}
+ req = requests.delete(url, params=params)
+ if req.status_code is not 200:
+ print >> sys.stderr, "Failed to unpublish, server responsed with code " + str(req.status_code)
+ return 1
+
+ return 0
+
+def main():
+ if len(sys.argv) is 1:
+ print output_json()[0]
+ return 0
+ elif sys.argv[1] == 'update-metadata':
+ return update_metadata()
+ elif sys.argv[1] == 'clear-metadata':
+ return clear_metadata()
+ elif sys.argv[1] == 'publish':
+ return publish()
+ elif sys.argv[1] == 'unpublish':
+ return unpublish()
+ else:
+ describe_usage()
+ return 22
+
+if __name__ == '__main__':
+ sys.exit(main())
--- /dev/null
- version_id=vi.index,
+from pecan.jsonify import jsonify
+from ceph_brag.model import db
+
+@jsonify.register(db.version_info)
+def jsonify_version(vi):
+ return dict(
+ version_number=vi.version_number,
+ version_date=str(vi.version_date)
+ )
+
+@jsonify.register(db.cluster_info)
+def jsonify_cluster_info(ci):
+ return dict(
+ uuid=ci.uuid,
+ organization=ci.organization,
+ email=ci.contact_email,
+ cluster_name=ci.cluster_name,
+ cluster_creation_date=str(ci.cluster_creation_date),
+ num_versions=ci.num_versions
+ )
+
+@jsonify.register(db.components_info)
+def jsonify_components_info(comps):
+ return dict(
+ bytes={'count':comps.byte_count, 'scale':comps.byte_scale},
+ osds=comps.num_osds,
+ objects=comps.num_objects,
+ pgs=comps.num_pgs,
+ pools=comps.num_pools,
+ mdss=comps.num_mdss,
+ mons=comps.num_mons
+ )
+
+@jsonify.register(db.pools_info)
+def jsonify_pools_info(pool):
+ return dict(rep_size=pool.pool_rep_size,
+ name=pool.pool_name,
+ id=pool.pool_id
+ )
+
+@jsonify.register(db.osds_info)
+def jsonify_osds_info(osd):
+ return dict(nw_info={'address':osd.nw_address,'hostname':osd.hostname},
+ hw_info={'swap_kb':osd.swap_kb,'mem_kb':osd.mem_kb,
+ 'arch':osd.arch, 'cpu':osd.cpu},
+ id=osd.osd_id,
+ os_info={'os':osd.os,'version':osd.os_version,
+ 'description':osd.os_desc, 'distro':osd.distro},
+ ceph_version=osd.ceph_version
+ )
+
+@jsonify.register(db.brag)
+def jsonify_brag(b):
+ ownership = {'organization':b.ci.organization,
+ 'description':b.ci.description,
+ 'email':b.ci.contact_email,
+ 'name':b.ci.cluster_name
+ }
+ crush_types=b.comps.crush_types.split(',')
+ return dict(uuid=b.ci.uuid,
+ cluster_creation_date=str(b.ci.cluster_creation_date),
+ components_count=b.comps,
+ crush_types=crush_types,
+ ownership=ownership,
+ pool_metadata=b.pools,
+ sysinfo=b.osds
+ )
--- /dev/null
- organization = Column(String)
- contact_email = Column(String)
- cluster_name = Column(String)
+import json
+from datetime import datetime
+from sqlalchemy.orm import sessionmaker, scoped_session
+from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
+from sqlalchemy import PrimaryKeyConstraint
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.ext.declarative import declared_attr
+
+Base = declarative_base()
+Session = scoped_session(sessionmaker())
+
+class cluster_info(Base):
+ __tablename__ = 'cluster_info'
+
+ index = Column(Integer, primary_key=True)
+ uuid = Column(String(36), unique=True)
- description = Column(String)
++ organization = Column(String(64))
++ contact_email = Column(String(32))
++ cluster_name = Column(String(32))
+ cluster_creation_date = Column(DateTime)
- crush_types = Column(String)
++ description = Column(String(32))
+ num_versions = Column(Integer)
+
+class version_info(Base):
+ __tablename__ = 'version_info'
+
+ index = Column(Integer, primary_key=True)
+ cluster_id = Column(ForeignKey('cluster_info.index'))
+ version_number = Column(Integer)
+ version_date = Column(DateTime)
+
+class components_info(Base):
+ __tablename__ = 'components_info'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
+ byte_count = Column(Integer)
+ byte_scale = Column(String(8))
+ num_osds = Column(Integer)
+ num_objects = Column(Integer)
+ num_pgs = Column(Integer)
+ num_pools = Column(Integer)
+ num_mdss = Column(Integer)
+ num_mons = Column(Integer)
- pool_id = Column(String)
- pool_name = Column(String)
++ crush_types = Column(String(256))
+
+class pools_info(Base):
+ __tablename__ = 'pools_info'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
- osd_id = Column(String)
++ pool_id = Column(String(8))
++ pool_name = Column(String(16))
+ pool_rep_size = Column(Integer)
+
+class osds_info(Base):
+ __tablename__ = 'osds_info'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
- hostname = Column(String)
++ osd_id = Column(String(8))
+ nw_address = Column(String(16))
- arch = Column(String)
- cpu = Column(String)
- os = Column(String)
- os_version = Column(String)
- os_desc = Column(String)
- distro = Column(String)
- ceph_version = Column(String)
++ hostname = Column(String(16))
+ swap_kb = Column(Integer)
+ mem_kb = Column(Integer)
++ arch = Column(String(16))
++ cpu = Column(String(16))
++ os = Column(String(16))
++ os_version = Column(String(16))
++ os_desc = Column(String(64))
++ distro = Column(String(64))
++ ceph_version = Column(String(64))
+
+class brag(object):
+ def __init__(self, uuid, version_number):
+ self.ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
+ if self.ci is not None:
+ self.vi = Session.query(version_info).filter_by(cluster_id=self.ci.index, version_number=version_number).first()
+
+ if self.ci is not None and self.vi is not None:
+ self.comps = Session.query(components_info).filter_by(vid=self.vi.index).first()
+ self.pools = Session.query(pools_info).filter_by(vid=self.vi.index).all()
+ self.osds = Session.query(osds_info).filter_by(vid=self.vi.index).all()
+
+def put_new_version(data):
+ info = json.loads(data)
+ def add_cluster_info():
+ ci = Session.query(cluster_info).filter_by(uuid=info['uuid']).first()
+ if ci is None:
+ dt = datetime.strptime(info['cluster_creation_date'], "%Y-%m-%d %H:%M:%S.%f")
+ ci = cluster_info(uuid=info['uuid'],
+ organization=info['ownership']['organization'],
+ contact_email=info['ownership']['email'],
+ cluster_name=info['ownership']['name'],
+ description=info['ownership']['description'],
+ cluster_creation_date=dt,
+ num_versions=1)
+ Session.add(ci)
+ Session.commit()
+ else:
+ ci.num_versions += 1
+
+ return ci
+
+ def add_version_info(ci):
+ vi = version_info(cluster_id=ci.index,
+ version_number=ci.num_versions,
+ version_date=datetime.now())
+ Session.add(vi)
+ return vi
+
+ def add_components_info(vi):
+ comps_count= info['components_count']
+ comps_info = components_info(vid=vi.index,
+ byte_count=comps_count['bytes']['count'],
+ byte_scale=comps_count['bytes']['scale'],
+ num_osds=comps_count['osds'],
+ num_objects=comps_count['objects'],
+ num_pgs=comps_count['pgs'],
+ num_pools=comps_count['pools'],
+ num_mdss=comps_count['mdss'],
+ num_mons=comps_count['mons'],
+ crush_types=','.join(info['crush_types']))
+ Session.add(comps_info)
+
+ def add_pools_info(vi):
+ pools = info['pool_metadata']
+ for p in pools:
+ Session.add(pools_info(vid=vi.index,
+ pool_id=p['id'],
+ pool_name=p['name'],
+ pool_rep_size=p['rep_size']))
+
+ def add_osds_info(vi):
+ osds = info['sysinfo']
+ for o in osds:
+ osd = osds_info(vid=vi.index,
+ osd_id=o['id'],
+ nw_address=o['nw_info']['address'],
+ hostname=o['nw_info']['hostname'],
+ swap_kb=o['hw_info']['swap_kb'],
+ mem_kb=o['hw_info']['mem_kb'],
+ arch=o['hw_info']['arch'],
+ cpu=o['hw_info']['cpu'],
+ os=o['os_info']['os'],
+ os_version=o['os_info']['version'],
+ os_desc=o['os_info']['description'],
+ distro=o['os_info']['distro'],
+ ceph_version=o['ceph_version'])
+ Session.add(osd)
+
+ ci = add_cluster_info()
+ add_version_info(ci)
+ vi = Session.query(version_info).filter_by(cluster_id=ci.index,
+ version_number=ci.num_versions).first()
+ add_components_info(vi)
+ add_pools_info(vi)
+ add_osds_info(vi)
+
+def delete_uuid(uuid):
+ ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
+ if ci is None:
+ return {'status':400, 'msg':'No information for this UUID'}
+
+ for v in Session.query(version_info).filter_by(cluster_id=ci.index).all():
+ Session.query(components_info).filter_by(vid=v.index).delete()
+ Session.query(pools_info).filter_by(vid=v.index).delete()
+ Session.query(osds_info).filter_by(vid=v.index).delete()
++ Session.flush()
+ Session.delete(v)
++ Session.flush()
+
+ Session.delete(ci)
+ return None
+
+def get_uuids():
+ return Session.query(cluster_info).all()
+
+def get_versions(uuid):
+ ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
+ if ci is None:
+ return None
+
+ return Session.query(version_info).filter_by(cluster_id=ci.index).all()
+
+def get_brag(uuid, version_id):
+ b = brag(uuid, version_id)
+ if b.ci is None or b.vi is None:
+ return None
+
+ return b