--- /dev/null
- "num_bytes": 0,
+# Ceph-brag
+
+`ceph-brag` is going to be an anonymized cluster reporting tool designed to collect a "registry" of Ceph clusters for community knowledge.
+This data will be displayed on a public web page using UUID by default, but users can claim their cluster and publish information about ownership if they so desire.
+
+For more information please visit:
+
+* [Blueprint](http://wiki.ceph.com/Planning/Blueprints/Firefly/Ceph-Brag)
+* [CDS Etherpad](http://pad.ceph.com/p/cdsfirefly-ceph-brag)
+
+# Client
+
+## How to use:
+
+### Pre-requisites:
+ceph-brag uses 'ceph' python script. Hence, before executing ceph-brag script ensure that ceph services are all running and 'ceph' script is in 'PATH' environment
+
+### Runtime instructions:
+Run 'ceph-brag -h' to get the usage information of this tool.
+
+### Sample output:
+
+ {
+ "cluster_creation_date": "2014-01-16 13:38:41.928551",
+ "uuid": "20679d0e-04b1-4004-8ee9-45ac271510e9",
+ "components_count": {
++ "num_data_bytes": 0,
++ "num_bytes_total": 1209312904,
+ "num_osds": 1,
+ "num_objects": 0,
+ "num_pgs": 192,
+ "num_pools": 3,
+ "num_mdss": 1,
+ "num_mons": 1
+ },
+ "crush_types": [
+ {
+ "type": "osd"
+ "count": 2,
+ },
+ {
+ "type": "rack"
+ "count": 1,
+ },
+ {
+ "type": "host"
+ "count": 1,
+ },
+ {
+ "type": "root"
+ "count": 1,
+ }
+ ],
+ "ownership": {
+ "organization": "eNovance",
+ "description": "Use case1",
+ "email": "mail@enovance.com",
+ "name": "Cluster1"
+ },
+ "pool_metadata": [
+ {
+ "size": 3,
+ "id": 0,
+ "type": 1
+ },
+ {
+ "size": 3,
+ "id": 1,
+ "type": 1
+ },
+ {
+ "size": 3,
+ "id": 2,
+ "name": 1
+ }
+ ],
+ "sysinfo": {
+ "kernel_types": [
+ {
+ "count": 1,
+ "type": "#36-Ubuntu SMP Tue Apr 10 22:29:03 UTC 2012"
+ }
+ ],
+ "cpu_archs": [
+ {
+ "count": 1,
+ "arch": "x86_64"
+ }
+ ],
+ "cpus": [
+ {
+ "count": 1,
+ "cpu": "Intel Xeon E312xx (Sandy Bridge)"
+ }
+ ],
+ "kernel_versions": [
+ {
+ "count": 1,
+ "version": "3.2.0-23-virtual"
+ }
+ ],
+ "ceph_versions": [
+ {
+ "count": 1,
+ "version": "0.75-229-g4050eae(4050eae32cd77a1c210ca11d0f12c74daecb1bd3)"
+ }
+ ],
+ "os_info": [
+ {
+ "count": 1,
+ "os": "Linux"
+ }
+ ],
+ "distros": [
+ {
+ "count": 1,
+ "distro": "Ubuntu 12.04 precise (Ubuntu 12.04 LTS)"
+ }
+ ]
+ }
+ }
+
+
+# Server
+
+## Info
+The ceph-brag server code is a python based web application.
+
+## How to use
+
+### Prerequisites
+* [pecan](http://pecanpy.org) is the web framework that is used by this application.
+* [sqlalchemy](www.sqlalchemy.org) is the ORM that is used by this application
+
+### How to deploy
+* [Common recipes to deploy](http://pecan.readthedocs.org/en/latest/deployment.html#common-recipes)
+* Modify server/config.py:sqlalchemy['url'] to point the correct database connection
+
+## URLs
+Following are the REST urls that are implemented with 'url-prefix' being the mount point for the WSGI script
+
+### GET
+
+##### * GET /url-prefix/
+Returns the list of clusters that are registered so far.
+Outputs - On success application/json of the following format is returned
+
+ [
+ {
+ "num_versions": 3,
+ "cluster_creation_date": "2014-01-16 13:38:41.928551",
+ "uuid": "20679d0e-04b1-4004-8ee9-45ac271510e9",
+ "cluster_name": "Cluster1",
+ "organization": "eNovance",
+ "email": "mail@enovance.com"
+ },
+ ...
+ ]
+
+##### * GET /url-prefix/UUID
+Returns the list of version information for a particular UUID.
+Outputs - On success application/json of the following format is returned
+
+ [
+ {
+ "version_number": 1,
+ "version_date": "2014-02-10 10:17:56.283499"
+ },
+ ...
+ ]
+
+##### * GET /url-prefix/UUID/version\_number
+Returns the entire brag report as mentioned in client's sample output for a particular version of a UUID
+
+### PUT
+
+##### * PUT /url-prefix
+Uploads the brag report and creates a new version for the UUID mentioned in the payload
+
+### DELETE
+
+##### * DELETE /url-prefix?uuid=xxxx
+Deletes all the versions of a cluster whose UUID is sent as a parameter
+
+
--- /dev/null
- def get_cluster_creation_date():
- (rc, o, e) = run_command(['ceph', 'mon', 'dump', '-f', 'json'])
- if rc is not 0:
- raise RuntimeError("\'ceph mon dump\' failed - " + e)
-
- oj = json.loads(o)
- return oj['created']
-
+#!/usr/bin/env python
+
+import subprocess
+import uuid
+import re
+import json
+import sys
+import ast
+import requests
+from collections import Counter
+
+CLUSTER_UUID_NAME='cluster-uuid'
+CLUSTER_OWNERSHIP_NAME='cluster-ownership'
+
+def run_command(cmd):
+ child = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (o, e) = child.communicate()
+ return (child.returncode, o, e)
+
+def get_uuid():
+ (rc,uid,e) = run_command(['ceph', 'config-key', 'get', CLUSTER_UUID_NAME])
+ if rc is not 0:
+ #uuid is not yet set.
+ uid = str(uuid.uuid4())
+ (rc, o, e) = run_command(['ceph', 'config-key', 'put',
+ CLUSTER_UUID_NAME, uid])
+ if rc is not 0:
+ raise RuntimeError("\'ceph config-key put\' failed -" + e)
+
+ return uid
+
- num_bytes = pgmap['data_bytes']
+def bytes_pretty_to_raw(byte_count, byte_scale):
+ if byte_scale == 'kB':
+ return byte_count >> 10
+ if byte_scale == 'MB':
+ return byte_count >> 20
+ if byte_scale == 'GB':
+ return byte_count >> 30
+ if byte_scale == 'TB':
+ return byte_count >> 40
+ if byte_scale == 'PB':
+ return byte_count >> 50
+ if byte_scale == 'EB':
+ return byte_count >> 60
+
+ return byte_count
+
+def get_nums():
+ (rc, o, e) = run_command(['ceph', '-s', '-f', 'json'])
+ if rc is not 0:
+ raise RuntimeError("\'ceph -s\' failed - " + e)
+
+ oj = json.loads(o)
+ num_mons = len(oj['monmap']['mons'])
+ num_osds = int(oj['osdmap']['osdmap']['num_in_osds'])
+ num_mdss = oj['mdsmap']['in']
+
+ pgmap = oj['pgmap']
+ num_pgs = pgmap['num_pgs']
- 'num_bytes':num_bytes,
++ num_data_bytes = pgmap['data_bytes']
++ num_bytes_total = pgmap['bytes_total']
+
+ (rc, o, e) = run_command(['ceph', 'pg', 'dump', 'pools', '-f', 'json-pretty'])
+ if rc is not 0:
+ raise RuntimeError("\'ceph pg dump pools\' failed - " + e)
+
+ pools = json.loads(o)
+ num_pools = len(pools)
+ num_objs = 0
+ for p in pools:
+ num_objs += p['stat_sum']['num_objects']
+
+ nums = {'num_mons':num_mons,
+ 'num_osds':num_osds,
+ 'num_mdss':num_mdss,
+ 'num_pgs':num_pgs,
- buckets = {}
- items_list = []
++ 'num_data_bytes':num_data_bytes,
++ 'num_bytes_total':num_bytes_total,
+ 'num_pools':num_pools,
+ 'num_objects':num_objs}
+ return nums
+
+def get_crush_types():
+ (rc, o, e) = run_command(['ceph', 'osd', 'crush', 'dump'])
+ if rc is not 0:
+ raise RuntimeError("\'ceph osd crush dump\' failed - " + e)
+
+ crush_dump = json.loads(o)
+ if crush_dump['types'] is None:
+ raise RuntimeError("\'types\' item missing in \'ceph osd crush dump\'")
+
+ crush_types = {}
+ for t in crush_dump['types']:
+ crush_types[t['type_id']] = t['name']
+
- buckets[bucket['id']] = bucket['type_id']
- for item in bucket['items']:
- items_list.append(item['id'])
++ types_list = []
+ for bucket in crush_dump['buckets']:
- counter = Counter(items_list)
++ types_list.append(bucket['type_id'])
+
+ crush_map = []
- for id,count in counter.items():
- if id in buckets:
- append(crush_types[buckets[id]],
++ types_counter = Counter(types_list)
+ append = lambda t,c: crush_map.append({'type':t, 'count':c})
- del buckets[id]
- else:
- append(crush_types[id], count)
++ for id,count in types_counter.items():
++ append(crush_types[id],
+ count)
- #the root item
- for id,type_id in buckets.items():
- append(crush_types[type_id], 1)
+
- def get_pool_metadata():
++ if 'devices' in crush_dump:
++ append('devices', len(crush_dump['devices']))
+
+ return crush_map
+
- return pool_meta
++def get_osd_dump_info():
+ (rc, o, e) = run_command(['ceph', 'osd', 'dump', '-f', 'json'])
+ if rc is not 0:
+ raise RuntimeError("\'ceph osd dump\' failed - " + e)
+
+ pool_meta = []
+ oj = json.loads(o)
+ proc = lambda x: {'id':x['pool'], 'type':x['type'], 'size':x['size']}
+ for p in oj['pools']:
+ pool_meta.append(proc(p))
+
- out['cluster_creation_date'] = get_cluster_creation_date()
++ return oj['created'], pool_meta
+
+def get_sysinfo(max_osds):
+ count = 0
+ osd_metadata_available = False
+
+ os = {}
+ kern_version = {}
+ kern_description = {}
+ distro = {}
+ cpu = {}
+ arch = {}
+ ceph_version = {}
+
+ incr = lambda a,k: 1 if k not in a else a[k]+1
+ while count < max_osds:
+ meta = {'id':count}
+ (rc, o, e) = run_command(['ceph', 'osd', 'metadata', str(count)])
+ if rc is 0:
+ if osd_metadata_available is False:
+ osd_metadata_available = True
+
+ jmeta = json.loads(o)
+
+ version = jmeta['ceph_version'].split()
+ cv = version[2]
+ if (len(version) > 3):
+ cv += version[3]
+
+ ceph_version[cv] = incr(ceph_version, cv)
+ os[jmeta['os']] = incr(os, jmeta['os'])
+ kern_version[jmeta['kernel_version']] = \
+ incr(kern_version, jmeta['kernel_version'])
+ kern_description[jmeta['kernel_description']] = \
+ incr(kern_description, jmeta['kernel_description'])
+
+ try:
+ dstr = jmeta['distro'] + ' '
+ dstr += jmeta['distro_version'] + ' '
+ dstr += jmeta['distro_codename'] + ' ('
+ dstr += jmeta['distro_description'] + ')'
+ distro[dstr] = incr(distro, dstr)
+ except KeyError as ke:
+ pass
+
+ cpu[jmeta['cpu']] = incr(cpu, jmeta['cpu'])
+ arch[jmeta['arch']] = incr(arch, jmeta['arch'])
+
+ count = count + 1
+
+ sysinfo = {}
+ if osd_metadata_available is False:
+ print >> sys.stderr, "'ceph osd metadata' is not available at all"
+ return sysinfo
+
+ def jsonify(type_count, name, type_name):
+ tmp = []
+ for k, v in type_count.items():
+ tmp.append({type_name:k, 'count':v})
+ sysinfo[name] = tmp
+
+ jsonify(os, 'os_info', 'os')
+ jsonify(kern_version, 'kernel_versions', 'version')
+ jsonify(kern_description, 'kernel_types', 'type')
+ jsonify(distro, 'distros', 'distro')
+ jsonify(cpu, 'cpus', 'cpu')
+ jsonify(arch, 'cpu_archs', 'arch')
+ jsonify(ceph_version, 'ceph_versions', 'version')
+ return sysinfo
+
+def get_ownership_info():
+ (rc, o, e) = run_command(['ceph', 'config-key', 'get',
+ CLUSTER_OWNERSHIP_NAME])
+ if rc is not 0:
+ return {}
+
+ return ast.literal_eval(o)
+
+def output_json():
+ out = {}
+ url = None
+
+ out['uuid'] = get_uuid()
- out['pool_metadata'] = get_pool_metadata()
+ nums = get_nums()
+ num_osds = int(nums['num_osds'])
+ out['components_count'] = nums
+ out['crush_types'] = get_crush_types()
++ out['cluster_creation_date'], out['pool_metadata'] = get_osd_dump_info()
+ out['sysinfo'] = get_sysinfo(num_osds)
+
+ owner = get_ownership_info()
+ if owner is not None:
+ out['ownership'] = owner
+ if 'url' in owner:
+ url = owner.pop('url')
+
+ return json.dumps(out, indent=2, separators=(',', ': ')), url
+
+def describe_usage():
+ print >> sys.stderr, "Usage:"
+ print >> sys.stderr, "======\n"
+
+ print >> sys.stderr, sys.argv[0] + " <commands> [command-options]\n"
+ print >> sys.stderr, "commands:"
+ print >> sys.stderr, "publish - publish the brag report to the server"
+ print >> sys.stderr, "update-metadata <update-metadata-options> - Update"
+ print >> sys.stderr, " ownership information for bragging"
+ print >> sys.stderr, "clear-metadata - Clear information set by update-metadata"
+ print >> sys.stderr, "unpublish --yes-i-am-shy - delete the brag report from the server"
+ print >> sys.stderr, ""
+
+ print >> sys.stderr, "update-metadata options:"
+ print >> sys.stderr, "--name= - Name of the cluster"
+ print >> sys.stderr, "--organization= - Name of the organization"
+ print >> sys.stderr, "--email= - Email contact address"
+ print >> sys.stderr, "--description= - Reporting use-case"
+ print >> sys.stderr, "--url= - The URL that is used to publish and unpublish"
+ print >> sys.stderr, ""
+
+def update_metadata():
+ info = {}
+ possibles = ['name', 'organization', 'email', 'description', 'url']
+
+ #get the existing values
+ info = get_ownership_info();
+
+ for index in range(2, len(sys.argv)):
+ mo = re.search("--(\S+)=(.*)", sys.argv[index])
+ if not mo:
+ describe_usage()
+ return 22
+
+ k = mo.group(1)
+ v = mo.group(2)
+
+ if k in possibles:
+ info[k] = v
+ else:
+ print >> sys.stderr, "Unexpect option --" + k
+ describe_usage()
+ return 22
+
+ (rc, o, e) = run_command(['ceph', 'config-key', 'put',
+ CLUSTER_OWNERSHIP_NAME, str(info)])
+ return rc
+
+def clear_metadata():
+ (rc, o, e) = run_command(['ceph', 'config-key', 'del',
+ CLUSTER_OWNERSHIP_NAME])
+ return rc
+
+def publish():
+ data, url = output_json()
+ if url is None:
+ print >> sys.stderr, "Cannot publish until a URL is set using update-metadata"
+ return 1
+
+ req = requests.put(url, data=data)
+ if req.status_code is not 201:
+ print >> sys.stderr, "Failed to publish, server responded with code " + str(req.status_code)
+ print >> sys.stderr, req.text
+ return 1
+
+ return 0
+
+def unpublish():
+ if len(sys.argv) <= 2 or sys.argv[2] != '--yes-i-am-shy':
+ print >> sys.stderr, "unpublish should be followed by --yes-i-am-shy"
+ return 22
+
+ fail = False
+ owner = get_ownership_info()
+ if owner is None:
+ fail = True
+ try:
+ url = owner['url']
+ except KeyError as e:
+ fail = True
+
+ if fail:
+ print >> sys.stderr, "URL is not updated yet"
+ return 1
+
+ uuid = get_uuid()
+
+ params = {'uuid':uuid}
+ req = requests.delete(url, params=params)
+ if req.status_code is not 200:
+ print >> sys.stderr, "Failed to unpublish, server responsed with code " + str(req.status_code)
+ return 1
+
+ return 0
+
+def main():
+ if len(sys.argv) is 1:
+ print output_json()[0]
+ return 0
+ elif sys.argv[1] == 'update-metadata':
+ return update_metadata()
+ elif sys.argv[1] == 'clear-metadata':
+ return clear_metadata()
+ elif sys.argv[1] == 'publish':
+ return publish()
+ elif sys.argv[1] == 'unpublish':
+ return unpublish()
+ else:
+ describe_usage()
+ return 22
+
+if __name__ == '__main__':
+ sys.exit(main())
--- /dev/null
- num_bytes=comps.num_bytes,
+from pecan.jsonify import jsonify
+from ceph_brag.model import db
+
+@jsonify.register(db.version_info)
+def jsonify_version(vi):
+ return dict(
+ version_number=vi.version_number,
+ version_date=str(vi.version_date)
+ )
+
+@jsonify.register(db.cluster_info)
+def jsonify_cluster_info(ci):
+ return dict(
+ uuid=ci.uuid,
+ organization=ci.organization,
+ email=ci.contact_email,
+ cluster_name=ci.cluster_name,
+ cluster_creation_date=str(ci.cluster_creation_date),
+ num_versions=ci.num_versions
+ )
+
+@jsonify.register(db.components_info)
+def jsonify_components_info(comps):
+ return dict(
++ num_data_bytes=comps.num_data_bytes,
++ num_bytes_total=comps.num_bytes_total,
+ num_osds=comps.num_osds,
+ num_objects=comps.num_objects,
+ num_pgs=comps.num_pgs,
+ num_pools=comps.num_pools,
+ num_mdss=comps.num_mdss,
+ num_mons=comps.num_mons)
+
+@jsonify.register(db.crush_types)
+def jsonify_crush_types(crush):
+ return dict(type=crush.crush_type,
+ count=crush.crush_count)
+
+@jsonify.register(db.pools_info)
+def jsonify_pools_info(pool):
+ return dict(size=pool.pool_rep_size,
+ type=pool.pool_type,
+ id=pool.pool_id)
+
+@jsonify.register(db.os_info)
+def jsonify_os_info(value):
+ return dict(os=value.os,
+ count=value.count)
+
+@jsonify.register(db.kernel_versions)
+def jsonify_kernel_versions(value):
+ return dict(version=value.version,
+ count=value.count)
+
+@jsonify.register(db.kernel_types)
+def jsonify_kernel_types(value):
+ return dict(type=value.type,
+ count=value.count)
+
+@jsonify.register(db.distros)
+def jsonify_distros(value):
+ return dict(distro=value.distro,
+ count=value.count)
+
+@jsonify.register(db.cpus)
+def jsonify_cpus(value):
+ return dict(cpu=value.cpu,
+ count=value.count)
+
+@jsonify.register(db.cpu_archs)
+def jsonify_cpu_archs(value):
+ return dict(arch=value.arch,
+ count=value.count)
+
+@jsonify.register(db.ceph_versions)
+def jsonify_ceph_versions(value):
+ return dict(version=value.version,
+ count=value.count)
+
+@jsonify.register(db.sysinfo)
+def jsonify_sysinfo(value):
+ retval = {}
+
+ if value.os:
+ retval['os_info'] = value.os
+ if value.kern_vers:
+ retval['kernel_versions'] = value.kern_vers
+ if value.kern_types:
+ retval['kernel_types'] = value.kern_types
+ if value.distros:
+ retval['distros'] = value.distros
+ if value.cpus:
+ retval['cpus'] = value.cpus
+ if value.cpu_archs:
+ retval['cpu_archs'] = value.cpu_archs
+ if value.ceph_vers:
+ retval['ceph_versions'] = value.ceph_vers
+
+ return retval
+
+@jsonify.register(db.brag)
+def jsonify_brag(b):
+ ownership = {'organization':b.ci.organization,
+ 'description':b.ci.description,
+ 'email':b.ci.contact_email,
+ 'name':b.ci.cluster_name
+ }
+ return dict(uuid=b.ci.uuid,
+ cluster_creation_date=str(b.ci.cluster_creation_date),
+ components_count=b.comps,
+ crush_types=b.crush,
+ ownership=ownership,
+ pool_metadata=b.pools,
+ sysinfo=b.sysinfo
+ )
--- /dev/null
- num_bytes = Column(BigInteger)
+import json
+from datetime import datetime
+from sqlalchemy.orm import sessionmaker, scoped_session
+from sqlalchemy import Column, Integer, String, \
+ DateTime, ForeignKey, BigInteger
+from sqlalchemy import PrimaryKeyConstraint
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.ext.declarative import declared_attr
+
+Base = declarative_base()
+Session = scoped_session(sessionmaker())
+
+class cluster_info(Base):
+ __tablename__ = 'cluster_info'
+
+ index = Column(Integer, primary_key=True)
+ uuid = Column(String(36), unique=True)
+ organization = Column(String(64))
+ contact_email = Column(String(32))
+ cluster_name = Column(String(32))
+ cluster_creation_date = Column(DateTime)
+ description = Column(String(32))
+ num_versions = Column(Integer)
+
+class version_info(Base):
+ __tablename__ = 'version_info'
+
+ index = Column(Integer, primary_key=True)
+ cluster_id = Column(ForeignKey('cluster_info.index'))
+ version_number = Column(Integer)
+ version_date = Column(DateTime)
+
+class components_info(Base):
+ __tablename__ = 'components_info'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
- num_bytes=comps_count['num_bytes'],
++ num_data_bytes = Column(BigInteger)
++ num_bytes_total = Column(BigInteger)
+ num_osds = Column(Integer)
+ num_objects = Column(Integer)
+ num_pgs = Column(Integer)
+ num_pools = Column(Integer)
+ num_mdss = Column(Integer)
+ num_mons = Column(Integer)
+
+class crush_types(Base):
+ __tablename__ = 'crush_types'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
+ crush_type = Column(String(16))
+ crush_count = Column(Integer)
+
+class pools_info(Base):
+ __tablename__ = 'pools_info'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
+ pool_id = Column(Integer)
+ pool_type = Column(Integer)
+ pool_rep_size = Column(Integer)
+
+class os_info(Base):
+ __tablename__ = 'os_info'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
+ os = Column(String(16))
+ count = Column(Integer)
+
+class kernel_versions(Base):
+ __tablename__ = 'kernel_versions'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
+ version = Column(String(16))
+ count = Column(Integer)
+
+class kernel_types(Base):
+ __tablename__ = 'kernel_types'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
+ type = Column(String(64))
+ count = Column(Integer)
+
+class distros(Base):
+ __tablename__ = 'distros'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
+ distro = Column(String(64))
+ count = Column(Integer)
+
+class cpus(Base):
+ __tablename__ = 'cpus'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
+ cpu = Column(String(16))
+ count = Column(Integer)
+
+class cpu_archs(Base):
+ __tablename__ = 'cpu_archs'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
+ arch = Column(String(16))
+ count = Column(Integer)
+
+class ceph_versions(Base):
+ __tablename__ = 'ceph_versions'
+
+ index = Column(Integer, primary_key=True)
+ vid = Column(ForeignKey('version_info.index'))
+ version = Column(String(16))
+ count = Column(Integer)
+
+class sysinfo(object):
+ def __init__(self, vindex):
+ self.os = Session.query(os_info).filter_by(vid=vindex).all()
+ self.kern_vers = Session.query(kernel_versions).filter_by(vid=vindex).all()
+ self.kern_types = Session.query(kernel_types).filter_by(vid=vindex).all()
+ self.distros = Session.query(distros).filter_by(vid=vindex).all()
+ self.cpus = Session.query(cpus).filter_by(vid=vindex).all()
+ self.cpu_archs = Session.query(cpu_archs).filter_by(vid=vindex).all()
+ self.ceph_vers = Session.query(ceph_versions).filter_by(vid=vindex).all()
+
+class brag(object):
+ def __init__(self, uuid, version_number):
+ self.ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
+ if self.ci is not None:
+ self.vi = Session.query(version_info).filter_by(cluster_id=self.ci.index, version_number=version_number).first()
+
+ if self.ci is not None and self.vi is not None:
+ self.comps = Session.query(components_info).filter_by(vid=self.vi.index).first()
+ self.crush = Session.query(crush_types).filter_by(vid=self.vi.index).all()
+ self.pools = Session.query(pools_info).filter_by(vid=self.vi.index).all()
+ self.sysinfo = sysinfo(self.vi.index)
+
+def put_new_version(data):
+ info = json.loads(data)
+ def add_cluster_info():
+ ci = Session.query(cluster_info).filter_by(uuid=info['uuid']).first()
+ if ci is None:
+ dt = datetime.strptime(info['cluster_creation_date'], "%Y-%m-%d %H:%M:%S.%f")
+ ci = cluster_info(uuid=info['uuid'],
+ organization=info['ownership']['organization'],
+ contact_email=info['ownership']['email'],
+ cluster_name=info['ownership']['name'],
+ description=info['ownership']['description'],
+ cluster_creation_date=dt,
+ num_versions=1)
+ Session.add(ci)
+ Session.commit()
+ else:
+ ci.num_versions += 1
+
+ return ci
+
+ def add_version_info(ci):
+ vi = version_info(cluster_id=ci.index,
+ version_number=ci.num_versions,
+ version_date=datetime.now())
+ Session.add(vi)
+ return vi
+
+ def add_components_info(vi):
+ comps_count= info['components_count']
+ comps_info = components_info(vid=vi.index,
++ num_data_bytes=comps_count['num_data_bytes'],
++ num_bytes_total=comps_count['num_bytes_total'],
+ num_osds=comps_count['num_osds'],
+ num_objects=comps_count['num_objects'],
+ num_pgs=comps_count['num_pgs'],
+ num_pools=comps_count['num_pools'],
+ num_mdss=comps_count['num_mdss'],
+ num_mons=comps_count['num_mons'])
+ Session.add(comps_info)
+
+ def add_crush_types(vi):
+ for c in info['crush_types']:
+ Session.add(crush_types(vid=vi.index,
+ crush_type=c['type'],
+ crush_count=c['count']))
+
+ def add_pools_info(vi):
+ pools = info['pool_metadata']
+ for p in pools:
+ Session.add(pools_info(vid=vi.index,
+ pool_id=p['id'],
+ pool_type=p['type'],
+ pool_rep_size=p['size']))
+
+ def add_sys_info(vi):
+ si = info['sysinfo']
+ while si:
+ k,v = si.popitem()
+ if k == 'os_info':
+ for o in v:
+ Session.add(os_info(vid=vi.index,
+ os=o['os'],
+ count=o['count']))
+ elif k == 'kernel_versions':
+ for k in v:
+ Session.add(kernel_versions(vid=vi.index,
+ version=k['version'],
+ count=k['count']))
+ elif k == 'kernel_types':
+ for k in v:
+ Session.add(kernel_types(vid=vi.index,
+ type=k['type'],
+ count=k['count']))
+ elif k == 'distros':
+ for d in v:
+ Session.add(distros(vid=vi.index,
+ distro=d['distro'],
+ count=d['count']))
+ elif k == 'cpus':
+ for c in v:
+ Session.add(cpus(vid=vi.index,
+ cpu=c['cpu'],
+ count=c['count']))
+ elif k == 'cpu_archs':
+ for c in v:
+ Session.add(cpu_archs(vid=vi.index,
+ arch=c['arch'],
+ count=c['count']))
+ elif k == 'ceph_versions':
+ for c in v:
+ Session.add(ceph_versions(vid=vi.index,
+ version=c['version'],
+ count=c['count']))
+
+ ci = add_cluster_info()
+ add_version_info(ci)
+ vi = Session.query(version_info).filter_by(cluster_id=ci.index,
+ version_number=ci.num_versions).first()
+ add_components_info(vi)
+ add_crush_types(vi)
+ add_pools_info(vi)
+ add_sys_info(vi)
+
+def delete_uuid(uuid):
+ ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
+ if ci is None:
+ return {'status':400, 'msg':'No information for this UUID'}
+
+ for v in Session.query(version_info).filter_by(cluster_id=ci.index).all():
+ Session.query(components_info).filter_by(vid=v.index).delete()
+ Session.query(crush_types).filter_by(vid=v.index).delete()
+ Session.query(pools_info).filter_by(vid=v.index).delete()
+ Session.query(os_info).filter_by(vid=v.index).delete()
+ Session.query(kernel_versions).filter_by(vid=v.index).delete()
+ Session.query(kernel_types).filter_by(vid=v.index).delete()
+ Session.query(distros).filter_by(vid=v.index).delete()
+ Session.query(cpus).filter_by(vid=v.index).delete()
+ Session.query(cpu_archs).filter_by(vid=v.index).delete()
+ Session.query(ceph_versions).filter_by(vid=v.index).delete()
+
+ Session.flush()
+ Session.delete(v)
+ Session.flush()
+
+ Session.delete(ci)
+ return None
+
+def get_uuids():
+ return Session.query(cluster_info).all()
+
+def get_versions(uuid):
+ ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
+ if ci is None:
+ return None
+
+ return Session.query(version_info).filter_by(cluster_id=ci.index).all()
+
+def get_brag(uuid, version_id):
+ b = brag(uuid, version_id)
+ if b.ci is None or b.vi is None:
+ return None
+
+ return b