--- /dev/null
--- /dev/null
++# Ceph-brag
++
++`ceph-brag` is going to be an anonymized cluster reporting tool designed to collect a "registry" of Ceph clusters for community knowledge.
++This data will be displayed on a public web page using UUID by default, but users can claim their cluster and publish information about ownership if they so desire.
++
++For more information please visit:
++
++* [Blueprint](http://wiki.ceph.com/Planning/Blueprints/Firefly/Ceph-Brag)
++* [CDS Etherpad](http://pad.ceph.com/p/cdsfirefly-ceph-brag)
++
++# Client
++
++## How to use:
++
++### Pre-requisites:
++ceph-brag uses 'ceph' python script. Hence, before executing ceph-brag script ensure that ceph services are all running and 'ceph' script is in 'PATH' environment
++
++### Runtime instructions:
++Run 'ceph-brag -h' to get the usage information of this tool.
++
++### Sample output:
++
++ {
++ "cluster_creation_date": "2014-01-16 13:38:41.928551",
++ "uuid": "20679d0e-04b1-4004-8ee9-45ac271510e9",
++ "components_count": {
++ "bytes": {
++ "count": 0,
++ "scale": "bytes"
++ },
++ "osds": 1,
++ "objects": 0,
++ "pgs": 192,
++ "pools": 3,
++ "mdss": 1,
++ "mons": 1
++ },
++ "crush_types": [
++ "osd",
++ "host",
++ "chassis",
++ "rack",
++ "row",
++ "pdu",
++ "pod",
++ "room",
++ "datacenter",
++ "region",
++ "root"
++ ],
++ "ownership": {
++ "organization": "eNovance",
++ "description": "Use case1",
++ "email": "mail@enovance.com",
++ "name": "Cluster1"
++ },
++ "pool_metadata": [
++ {
++ "rep_size": 3,
++ "id": "0",
++ "name": "data"
++ },
++ {
++ "rep_size": 3,
++ "id": "1",
++ "name": "metadata"
++ },
++ {
++ "rep_size": 3,
++ "id": "2",
++ "name": "rbd"
++ }
++ ],
++ "sysinfo": [
++ {
++ "nw_info": {
++ "hostname": "ceph-brag",
++ "address": "127.0.0.1"
++ },
++ "hw_info": {
++ "swap_kb": 0,
++ "arch": "x86_64",
++ "cpu": "Intel Xeon E312xx (Sandy Bridge)",
++ "mem_kb": 2051648
++ },
++ "id": 0,
++ "os_info": {
++ "version": "3.2.0-23-virtual",
++ "os": "Linux",
++ "description": "#36-Ubuntu SMP Tue Apr 10 22:29:03 UTC 2012",
++ "distro": "Ubuntu 12.04 precise (Ubuntu 12.04 LTS)"
++ },
++ "ceph_version": "ceph version 0.75-229-g4050eae (4050eae32cd77a1c210ca11d0f12c74daecb1bd3)"
++ }
++ ]
++ }
++
++
++# Server
++
++## Info
++The ceph-brag server code is a python based web application.
++
++## How to use
++
++### Prerequisites
++* [pecan](http://pecanpy.org) is the web framework that is used by this application.
++* [sqlalchemy](www.sqlalchemy.org) is the ORM that is used by this application
++
++### How to deploy
++* [Common recipes to deploy](http://pecan.readthedocs.org/en/latest/deployment.html#common-recipes)
++
++## URLs
++Following are the REST urls that are implemented with 'url-prefix' being the mount point for the WSGI script
++
++### GET
++
++##### * GET /url-prefix/
++Returns the list of clusters that are registered so far.
++Outputs - On success application/json of the following format is returned
++
++ [
++ {
++ "num_versions": 3,
++ "cluster_creation_date": "2014-01-16 13:38:41.928551",
++ "uuid": "20679d0e-04b1-4004-8ee9-45ac271510e9",
++ "cluster_name": "Cluster1",
++ "organization": "eNovance",
++ "email": "mail@enovance.com"
++ },
++ ...
++ ]
++
++##### * GET /url-prefix/UUID
++Returns the list of version information for a particular UUID.
++Outputs - On success application/json of the following format is returned
++
++ [
++ {
++ "version_number": 1,
++ "version_date": "2014-02-10 10:17:56.283499",
++ "version_id": 10
++ },
++ ...
++ ]
++
++##### * GET /url-prefix/UUID/version\_number
++Returns the entire brag report as mentioned in client's sample output for a particular version of a UUID
++
++### PUT
++
++##### * PUT /url-prefix
++Uploads the brag report and creates a new version for the UUID mentioned in the payload
++
++### DELETE
++
++##### * DELETE /url-prefix?uuid=xxxx
++Deletes all the versions of a cluster whose UUID is sent as a parameter
++
++
--- /dev/null
--- /dev/null
++#!/usr/bin/env python
++
++import subprocess
++import uuid
++import re
++import json
++import sys
++import ast
++import requests
++
++CLUSTER_UUID_NAME='cluster-uuid'
++CLUSTER_OWNERSHIP_NAME='cluster-ownership'
++
++def run_command(cmd):
++ child = subprocess.Popen(cmd, stdout=subprocess.PIPE,
++ stderr=subprocess.PIPE)
++ (o, e) = child.communicate()
++ return (child.returncode, o, e)
++
++def get_uuid():
++ (rc,uid,e) = run_command(['ceph', 'config-key', 'get', CLUSTER_UUID_NAME])
++ if rc is not 0:
++ #uuid is not yet set.
++ uid = str(uuid.uuid4())
++ (rc, o, e) = run_command(['ceph', 'config-key', 'put',
++ CLUSTER_UUID_NAME, uid])
++ if rc is not 0:
++ raise RuntimeError("\'ceph config-key put\' failed -" + e)
++
++ return uid
++
++def get_cluster_creation_date():
++ (rc, o, e) = run_command(['ceph', 'mon', 'dump'])
++ if rc is not 0:
++ raise RuntimeError("\'ceph mon dump\' failed - " + e)
++
++ rec = re.compile('(.*created\ )(.*)(\n.*)')
++
++ mo = rec.search(o);
++ if mo and mo.group(2) != '0.000000':
++ return mo.group(2)
++
++ # Try and get the date from osd dump
++ (rc, o, e) = run_command(['ceph', 'osd', 'dump'])
++ if rc is not 0:
++ raise RuntimeError("\'ceph osd dump\' failed - " + e)
++
++ mo = rec.search(o);
++ if not mo or mo.group(2) == '0.000000':
++ print >> sys.stderr, "Unable to get cluster creation date"
++
++ return mo.group(2)
++
++def get_nums():
++ (rc, o, e) = run_command(['ceph', '-s'])
++ if rc is not 0:
++ raise RuntimeError("\'ceph -s\' failed - " + e)
++
++ num_mons = 0
++
++ mo = re.search('(.*monmap\ .*:\ )(\d+)(.*)', o)
++ if not mo:
++ raise RuntimeError("Unmatched pattern for monmap in \'ceph status\'")
++ else:
++ num_mons = int(mo.group(2))
++
++ num_osds = 0
++ mo = re.search('.*osdmap.*(\d+).*(\d+).*(\d+).*', o)
++ if not mo:
++ raise RuntimeError("Unmatched pattern for osdmap in \'ceph status\'")
++ else:
++ num_osds = int(mo.group(1))
++
++ num_mdss = 0
++ mo = re.search('.*mdsmap\ e\d+.*(\d+)/(\d+)/(\d+).*', o)
++ if mo:
++ num_mdss = int(mo.group(2));
++
++ num_pgs = 0
++ num_pools = 0
++ num_bytes = 0
++ num_objs = 0
++ mo = re.search('.*pgmap\ v\d+:\ (\d+).*,\ (\d+).*,\ (\d+)\ (\S+)\ data,\ (\d+).*', o)
++ if not mo:
++ raise RuntimeError("Unmatched pattern for pgmap in \'ceph status\'")
++ else:
++ num_pgs = int(mo.group(1))
++ num_pools = int(mo.group(2))
++ byte_count = int(mo.group(3))
++ byte_scale = mo.group(4)
++ num_objs = int(mo.group(5))
++ nums = {'mons':num_mons,
++ 'osds':num_osds,
++ 'mdss':num_mdss,
++ 'pgs':num_pgs,
++ 'pools':num_pools,
++ 'bytes': {'count':byte_count, 'scale':byte_scale},
++ 'objects':num_objs}
++ return nums
++
++def get_crush_types():
++ (rc, o, e) = run_command(['ceph', 'osd', 'crush', 'dump'])
++ if rc is not 0:
++ raise RuntimeError("\'ceph osd crush dump\' failed - " + e)
++
++ crush_dump = json.loads(o)
++ if crush_dump['types'] is None:
++ raise RuntimeError("\'types\' item missing in \'ceph osd crush dump\'")
++
++ crush_types = []
++ for t in crush_dump['types']:
++ crush_types.append(t['name'])
++
++ return crush_types
++
++def get_pool_metadata():
++ (rc, o, e) = run_command(['ceph', 'osd', 'dump'])
++ if rc is not 0:
++ raise RuntimeError("\'ceph osd dump\' failed - " + e)
++
++ result = re.findall("pool\ (\d+)\ '(\S+)'\ rep\ size\ (\d+)", o)
++ if len(result) is 0:
++ #Check with replicated size
++ result = re.findall("pool\ (\d+)\ '(\S+)'\ replicated\ size\ (\d+)", o)
++ if len(result) is 0:
++ raise RuntimeError("Unmatched pattern for \'pool\' in \'ceph osd dump\'")
++
++ pool_meta = []
++ proc = lambda x: {'id':x[0], 'name':x[1], 'rep_size':int(x[2])}
++ for r in result:
++ pool_meta.append(proc(r))
++
++ return pool_meta
++
++def get_sysinfo(max_osds):
++ sysinfo = []
++ count = 0
++ osd_metadata_available = False
++
++ while count < max_osds:
++ meta = {'id':count}
++ (rc, o, e) = run_command(['ceph', 'osd', 'metadata', str(count)])
++ if rc is 0:
++ if osd_metadata_available is False:
++ osd_metadata_available = True
++ os_info = {}
++ hw_info = {}
++ nw_info = {}
++
++ jmeta = json.loads(o)
++
++ meta['ceph_version'] = jmeta['ceph_version']
++
++ os_info['os'] = jmeta['os']
++ os_info['version'] = jmeta['kernel_version']
++ os_info['description'] = jmeta['kernel_description']
++
++ try:
++ distro = jmeta['distro'] + ' '
++ distro += jmeta['distro_version'] + ' '
++ distro += jmeta['distro_codename'] + ' ('
++ distro += jmeta['distro_description'] + ')'
++ os_info['distro'] = distro
++ except KeyError as ke:
++ pass
++ meta['os_info'] = os_info
++
++ hw_info['cpu'] = jmeta['cpu']
++ hw_info['arch'] = jmeta['arch']
++ hw_info['mem_kb'] = int(jmeta['mem_total_kb'])
++ hw_info['swap_kb'] = int(jmeta['mem_swap_kb'])
++ meta['hw_info'] = hw_info
++
++ (ip, hname) = get_osd_host(count)
++ nw_info['address'] = ip
++ nw_info['hostname'] = hname
++ meta['nw_info'] = nw_info
++
++ sysinfo.append(meta)
++ count = count + 1
++
++ if osd_metadata_available is False:
++ print >> sys.stderr, "'ceph osd metadata' is not available at all"
++
++ return sysinfo
++
++def get_osd_host(osd_id):
++ loc = {}
++
++ (rc, o, e) = run_command(['ceph', 'osd', 'find', str(osd_id)])
++ if rc is not 0:
++ raise RuntimeError("\'ceph osd find\' failed - " + e)
++
++ jloc = json.loads(o)
++
++ mo = re.search("(\d+.\d+.\d+.\d+).*", jloc['ip'])
++ if mo is None:
++ #Might be in ipv6 format, TODO: Verify
++ return None;
++
++ ip = mo.group(1)
++ host = jloc['crush_location']['host']
++
++ return (ip, host)
++
++def get_ownership_info():
++ (rc, o, e) = run_command(['ceph', 'config-key', 'get',
++ CLUSTER_OWNERSHIP_NAME])
++ if rc is not 0:
++ return {}
++
++ return ast.literal_eval(o)
++
++def output_json():
++ out = {}
++ url = None
++
++ out['uuid'] = get_uuid()
++ out['cluster_creation_date'] = get_cluster_creation_date()
++ nums = get_nums()
++ num_osds = int(nums['osds'])
++ out['components_count'] = nums
++ out['crush_types'] = get_crush_types()
++ out['pool_metadata'] = get_pool_metadata()
++ out['sysinfo'] = get_sysinfo(num_osds)
++
++ owner = get_ownership_info()
++ if owner is not None:
++ out['ownership'] = owner
++ if 'url' in owner:
++ url = owner.pop('url')
++
++ return json.dumps(out, indent=2, separators=(',', ': ')), url
++
++def describe_usage():
++ print >> sys.stderr, "Usage:"
++ print >> sys.stderr, "======\n"
++
++ print >> sys.stderr, sys.argv[0] + " <commands> [command-options]\n"
++ print >> sys.stderr, "commands:"
++ print >> sys.stderr, "publish - publish the brag report to the server"
++ print >> sys.stderr, "update-metadata <update-metadata-options> - Update"
++ print >> sys.stderr, " ownership information for bragging"
++ print >> sys.stderr, "clear-metadata - Clear information set by update-metadata"
++ print >> sys.stderr, "unpublish --yes-i-am-shy - delete the brag report from the server"
++ print >> sys.stderr, ""
++
++ print >> sys.stderr, "update-metadata options:"
++ print >> sys.stderr, "--name= - Name of the cluster"
++ print >> sys.stderr, "--organization= - Name of the organization"
++ print >> sys.stderr, "--email= - Email contact address"
++ print >> sys.stderr, "--description= - Reporting use-case"
++ print >> sys.stderr, "--url= - The URL that is used to publish and unpublish"
++ print >> sys.stderr, ""
++
++def update_metadata():
++ info = {}
++ possibles = ['name', 'organization', 'email', 'description', 'url']
++
++ #get the existing values
++ info = get_ownership_info();
++
++ for index in range(2, len(sys.argv)):
++ mo = re.search("--(\S+)=(.*)", sys.argv[index])
++ if not mo:
++ describe_usage()
++ return 22
++
++ k = mo.group(1)
++ v = mo.group(2)
++
++ if k in possibles:
++ info[k] = v
++ else:
++ print >> sys.stderr, "Unexpect option --" + k
++ describe_usage()
++ return 22
++
++ (rc, o, e) = run_command(['ceph', 'config-key', 'put',
++ CLUSTER_OWNERSHIP_NAME, str(info)])
++ return rc
++
++def clear_metadata():
++ (rc, o, e) = run_command(['ceph', 'config-key', 'del',
++ CLUSTER_OWNERSHIP_NAME])
++ return rc
++
++def publish():
++ data, url = output_json()
++ if url is None:
++ print >> sys.stderr, "Cannot publish until a URL is set using update-metadata"
++ return 1
++
++ req = requests.put(url, data=data)
++ if req.status_code is not 201:
++ print >> sys.stderr, "Failed to publish, server responded with code " + str(req.status_code)
++ return 1
++
++ return 0
++
++def unpublish():
++ if len(sys.argv) <= 2 or sys.argv[2] != '--yes-i-am-shy':
++ print >> sys.stderr, "unpublish should be followed by --yes-i-am-shy"
++ return 22
++
++ fail = False
++ owner = get_ownership_info()
++ if owner is None:
++ fail = True
++ try:
++ url = owner['url']
++ except KeyError as e:
++ fail = True
++
++ if fail:
++ print >> sys.stderr, "URL is not updated yet"
++ return 1
++
++ uuid = get_uuid()
++
++ params = {'uuid':uuid}
++ req = requests.delete(url, params=params)
++ if req.status_code is not 200:
++ print >> sys.stderr, "Failed to unpublish, server responsed with code " + str(req.status_code)
++ return 1
++
++ return 0
++
++def main():
++ if len(sys.argv) is 1:
++ print output_json()[0]
++ return 0
++ elif sys.argv[1] == 'update-metadata':
++ return update_metadata()
++ elif sys.argv[1] == 'clear-metadata':
++ return clear_metadata()
++ elif sys.argv[1] == 'publish':
++ return publish()
++ elif sys.argv[1] == 'unpublish':
++ return unpublish()
++ else:
++ describe_usage()
++ return 22
++
++if __name__ == '__main__':
++ sys.exit(main())
--- /dev/null
--- /dev/null
++recursive-include public *
--- /dev/null
--- /dev/null
++import os
++from pecan.deploy import deploy
++
++cur_path = os.path.dirname(__file__)
++application = deploy(cur_path + '/config.py')
--- /dev/null
--- /dev/null
++Metadata-Version: 1.0
++Name: ceph-brag
++Version: 0.1
++Summary: UNKNOWN
++Home-page: UNKNOWN
++Author: UNKNOWN
++Author-email: UNKNOWN
++License: UNKNOWN
++Description: UNKNOWN
++Platform: UNKNOWN
--- /dev/null
--- /dev/null
++MANIFEST.in
++config.py
++setup.cfg
++setup.py
++ceph_brag/__init__.py
++ceph_brag/app.py
++ceph_brag/json.py
++ceph_brag.egg-info/PKG-INFO
++ceph_brag.egg-info/SOURCES.txt
++ceph_brag.egg-info/dependency_links.txt
++ceph_brag.egg-info/not-zip-safe
++ceph_brag.egg-info/requires.txt
++ceph_brag.egg-info/top_level.txt
++ceph_brag/controllers/__init__.py
++ceph_brag/controllers/root.py
++ceph_brag/model/__init__.py
++ceph_brag/model/db.py
++ceph_brag/tests/__init__.py
++ceph_brag/tests/config.py
++ceph_brag/tests/test_functional.py
++ceph_brag/tests/test_units.py
--- /dev/null
--- /dev/null
++
--- /dev/null
--- /dev/null
++
--- /dev/null
--- /dev/null
++pecan
--- /dev/null
--- /dev/null
++ceph_brag
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++from pecan import make_app
++from ceph_brag import model, json
++from pecan.hooks import TransactionHook
++
++def setup_app(config):
++
++ model.init_model()
++ app_conf = dict(config.app)
++
++ return make_app(
++ app_conf.pop('root'),
++ logging=getattr(config, 'logging', {}),
++ hooks=[TransactionHook(model.start,
++ model.start,
++ model.commit,
++ model.rollback,
++ model.clear)],
++ **app_conf
++ )
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++from pecan import expose, request, abort, response
++from webob import exc
++from pecan.rest import RestController
++from ceph_brag.model import db
++import sys, traceback
++
++class RootController(RestController):
++ def fail(self, status_code=200, msg="OK"):
++ response.status = status_code
++ return msg
++
++ @expose('json')
++ def get(self, *args, **kwargs):
++ if len(args) is 0:
++ #return the list of uuids
++ try:
++ result = db.get_uuids()
++ except Exception as e:
++ return self.fail(500, msg="Internal Server Error")
++ elif len(args) is 1 or len(args) is 2 and args[1] == '':
++ #/uuid
++ try:
++ result = db.get_versions(args[0])
++ except Exception as e:
++ return self.fail(status_code=500, msg="Internal Server Error")
++
++ if result is None:
++ return self.fail(400, msg="Invalid UUID")
++ elif len(args) is 2 or len(args) is 3 and args[2] == '':
++ #/uuid/version_number
++ try:
++ result = db.get_brag(args[0], args[1])
++ except Exception as e:
++ return self.fail(status_code=500, msg="Internal Server Error")
++
++ if result is None:
++ return self.fail(status_code=400, msg="Invalid UUID,version combination")
++ else:
++ return self.fail(status_code=400, msg="Invalid args")
++
++ return result
++
++ @expose(content_type='application/json')
++ def put(self, *args, **kwargs):
++ try:
++ db.put_new_version(request.body)
++ except ValueError as ve:
++ return self.fail(status_code=422, msg="Improper payload")
++ except KeyError as ke:
++ msg = "Payload not as expected, some keys are missing"
++ return self.fail(status_code=422, msg=msg)
++ except Exception as e:
++ return self.fail(status_code=500, msg="Internal Server Error")
++
++ response.status = 201
++ return "CREATED"
++
++ @expose()
++ def delete(self, *args, **kwargs):
++ if 'uuid' not in kwargs:
++ return self.fail(status_code=400, msg="Required uuid parameter")
++
++ uuid = kwargs['uuid']
++ try:
++ status = db.delete_uuid(uuid)
++ except Exception as e:
++ return self.fail(status_code=500, msg="Internal Server Error")
++
++ if status is not None:
++ return self.fail(status_code=status['status'], msg=status['msg'])
++
++ response.status=200
++ return "DELETED"
--- /dev/null
--- /dev/null
++from pecan.jsonify import jsonify
++from ceph_brag.model import db
++
++@jsonify.register(db.version_info)
++def jsonify_version(vi):
++ return dict(
++ version_id=vi.index,
++ version_number=vi.version_number,
++ version_date=str(vi.version_date)
++ )
++
++@jsonify.register(db.cluster_info)
++def jsonify_cluster_info(ci):
++ return dict(
++ uuid=ci.uuid,
++ organization=ci.organization,
++ email=ci.contact_email,
++ cluster_name=ci.cluster_name,
++ cluster_creation_date=str(ci.cluster_creation_date),
++ num_versions=ci.num_versions
++ )
++
++@jsonify.register(db.components_info)
++def jsonify_components_info(comps):
++ return dict(
++ bytes={'count':comps.byte_count, 'scale':comps.byte_scale},
++ osds=comps.num_osds,
++ objects=comps.num_objects,
++ pgs=comps.num_pgs,
++ pools=comps.num_pools,
++ mdss=comps.num_mdss,
++ mons=comps.num_mons
++ )
++
++@jsonify.register(db.pools_info)
++def jsonify_pools_info(pool):
++ return dict(rep_size=pool.pool_rep_size,
++ name=pool.pool_name,
++ id=pool.pool_id
++ )
++
++@jsonify.register(db.osds_info)
++def jsonify_osds_info(osd):
++ return dict(nw_info={'address':osd.nw_address,'hostname':osd.hostname},
++ hw_info={'swap_kb':osd.swap_kb,'mem_kb':osd.mem_kb,
++ 'arch':osd.arch, 'cpu':osd.cpu},
++ id=osd.osd_id,
++ os_info={'os':osd.os,'version':osd.os_version,
++ 'description':osd.os_desc, 'distro':osd.distro},
++ ceph_version=osd.ceph_version
++ )
++
++@jsonify.register(db.brag)
++def jsonify_brag(b):
++ ownership = {'organization':b.ci.organization,
++ 'description':b.ci.description,
++ 'email':b.ci.contact_email,
++ 'name':b.ci.cluster_name
++ }
++ crush_types=b.comps.crush_types.split(',')
++ return dict(uuid=b.ci.uuid,
++ cluster_creation_date=str(b.ci.cluster_creation_date),
++ components_count=b.comps,
++ crush_types=crush_types,
++ ownership=ownership,
++ pool_metadata=b.pools,
++ sysinfo=b.osds
++ )
--- /dev/null
--- /dev/null
++from sqlalchemy import create_engine
++from pecan import conf # noqa
++from db import Session, Base
++import sys
++
++def create_from_conf():
++ configs = dict(conf.sqlalchemy)
++ url = configs.pop('url')
++ return create_engine(url, **configs)
++
++def init_model():
++ engine = create_from_conf()
++ conf.sqlalchemy.engine = engine
++ engine.connect()
++ #create the tables if not existing
++ Base.metadata.create_all(engine)
++
++def start():
++ Session.bind = conf.sqlalchemy.engine
++
++def commit():
++ Session.commit()
++
++def rollback():
++ Session.rollback()
++
++def clear():
++ Session.remove()
++
--- /dev/null
--- /dev/null
++import json
++from datetime import datetime
++from sqlalchemy.orm import sessionmaker, scoped_session
++from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
++from sqlalchemy import PrimaryKeyConstraint
++from sqlalchemy.ext.declarative import declarative_base
++from sqlalchemy.ext.declarative import declared_attr
++
++Base = declarative_base()
++Session = scoped_session(sessionmaker())
++
++class cluster_info(Base):
++ __tablename__ = 'cluster_info'
++
++ index = Column(Integer, primary_key=True)
++ uuid = Column(String(36), unique=True)
++ organization = Column(String)
++ contact_email = Column(String)
++ cluster_name = Column(String)
++ cluster_creation_date = Column(DateTime)
++ description = Column(String)
++ num_versions = Column(Integer)
++
++class version_info(Base):
++ __tablename__ = 'version_info'
++
++ index = Column(Integer, primary_key=True)
++ cluster_id = Column(ForeignKey('cluster_info.index'))
++ version_number = Column(Integer)
++ version_date = Column(DateTime)
++
++class components_info(Base):
++ __tablename__ = 'components_info'
++
++ index = Column(Integer, primary_key=True)
++ vid = Column(ForeignKey('version_info.index'))
++ byte_count = Column(Integer)
++ byte_scale = Column(String(8))
++ num_osds = Column(Integer)
++ num_objects = Column(Integer)
++ num_pgs = Column(Integer)
++ num_pools = Column(Integer)
++ num_mdss = Column(Integer)
++ num_mons = Column(Integer)
++ crush_types = Column(String)
++
++class pools_info(Base):
++ __tablename__ = 'pools_info'
++
++ index = Column(Integer, primary_key=True)
++ vid = Column(ForeignKey('version_info.index'))
++ pool_id = Column(String)
++ pool_name = Column(String)
++ pool_rep_size = Column(Integer)
++
++class osds_info(Base):
++ __tablename__ = 'osds_info'
++
++ index = Column(Integer, primary_key=True)
++ vid = Column(ForeignKey('version_info.index'))
++ osd_id = Column(String)
++ nw_address = Column(String(16))
++ hostname = Column(String)
++ swap_kb = Column(Integer)
++ mem_kb = Column(Integer)
++ arch = Column(String)
++ cpu = Column(String)
++ os = Column(String)
++ os_version = Column(String)
++ os_desc = Column(String)
++ distro = Column(String)
++ ceph_version = Column(String)
++
++class brag(object):
++ def __init__(self, uuid, version_number):
++ self.ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
++ if self.ci is not None:
++ self.vi = Session.query(version_info).filter_by(cluster_id=self.ci.index, version_number=version_number).first()
++
++ if self.ci is not None and self.vi is not None:
++ self.comps = Session.query(components_info).filter_by(vid=self.vi.index).first()
++ self.pools = Session.query(pools_info).filter_by(vid=self.vi.index).all()
++ self.osds = Session.query(osds_info).filter_by(vid=self.vi.index).all()
++
++def put_new_version(data):
++ info = json.loads(data)
++ def add_cluster_info():
++ ci = Session.query(cluster_info).filter_by(uuid=info['uuid']).first()
++ if ci is None:
++ dt = datetime.strptime(info['cluster_creation_date'], "%Y-%m-%d %H:%M:%S.%f")
++ ci = cluster_info(uuid=info['uuid'],
++ organization=info['ownership']['organization'],
++ contact_email=info['ownership']['email'],
++ cluster_name=info['ownership']['name'],
++ description=info['ownership']['description'],
++ cluster_creation_date=dt,
++ num_versions=1)
++ Session.add(ci)
++ Session.commit()
++ else:
++ ci.num_versions += 1
++
++ return ci
++
++ def add_version_info(ci):
++ vi = version_info(cluster_id=ci.index,
++ version_number=ci.num_versions,
++ version_date=datetime.now())
++ Session.add(vi)
++ return vi
++
++ def add_components_info(vi):
++ comps_count= info['components_count']
++ comps_info = components_info(vid=vi.index,
++ byte_count=comps_count['bytes']['count'],
++ byte_scale=comps_count['bytes']['scale'],
++ num_osds=comps_count['osds'],
++ num_objects=comps_count['objects'],
++ num_pgs=comps_count['pgs'],
++ num_pools=comps_count['pools'],
++ num_mdss=comps_count['mdss'],
++ num_mons=comps_count['mons'],
++ crush_types=','.join(info['crush_types']))
++ Session.add(comps_info)
++
++ def add_pools_info(vi):
++ pools = info['pool_metadata']
++ for p in pools:
++ Session.add(pools_info(vid=vi.index,
++ pool_id=p['id'],
++ pool_name=p['name'],
++ pool_rep_size=p['rep_size']))
++
++ def add_osds_info(vi):
++ osds = info['sysinfo']
++ for o in osds:
++ osd = osds_info(vid=vi.index,
++ osd_id=o['id'],
++ nw_address=o['nw_info']['address'],
++ hostname=o['nw_info']['hostname'],
++ swap_kb=o['hw_info']['swap_kb'],
++ mem_kb=o['hw_info']['mem_kb'],
++ arch=o['hw_info']['arch'],
++ cpu=o['hw_info']['cpu'],
++ os=o['os_info']['os'],
++ os_version=o['os_info']['version'],
++ os_desc=o['os_info']['description'],
++ distro=o['os_info']['distro'],
++ ceph_version=o['ceph_version'])
++ Session.add(osd)
++
++ ci = add_cluster_info()
++ add_version_info(ci)
++ vi = Session.query(version_info).filter_by(cluster_id=ci.index,
++ version_number=ci.num_versions).first()
++ add_components_info(vi)
++ add_pools_info(vi)
++ add_osds_info(vi)
++
++def delete_uuid(uuid):
++ ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
++ if ci is None:
++ return {'status':400, 'msg':'No information for this UUID'}
++
++ for v in Session.query(version_info).filter_by(cluster_id=ci.index).all():
++ Session.query(components_info).filter_by(vid=v.index).delete()
++ Session.query(pools_info).filter_by(vid=v.index).delete()
++ Session.query(osds_info).filter_by(vid=v.index).delete()
++ Session.delete(v)
++
++ Session.delete(ci)
++ return None
++
++def get_uuids():
++ return Session.query(cluster_info).all()
++
++def get_versions(uuid):
++ ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
++ if ci is None:
++ return None
++
++ return Session.query(version_info).filter_by(cluster_id=ci.index).all()
++
++def get_brag(uuid, version_id):
++ b = brag(uuid, version_id)
++ if b.ci is None or b.vi is None:
++ return None
++
++ return b
--- /dev/null
--- /dev/null
++import os
++from unittest import TestCase
++from pecan import set_config
++from pecan.testing import load_test_app
++
++__all__ = ['FunctionalTest']
++
++
++class FunctionalTest(TestCase):
++ """
++ Used for functional tests where you need to test your
++ literal application and its integration with the framework.
++ """
++
++ def setUp(self):
++ self.app = load_test_app(os.path.join(
++ os.path.dirname(__file__),
++ 'config.py'
++ ))
++
++ def tearDown(self):
++ set_config({}, overwrite=True)
--- /dev/null
--- /dev/null
++# Server Specific Configurations
++server = {
++ 'port': '8080',
++ 'host': '0.0.0.0'
++}
++
++# Pecan Application Configurations
++app = {
++ 'root': 'ceph_brag.controllers.root.RootController',
++ 'modules': ['ceph_brag'],
++ 'static_root': '%(confdir)s/public',
++ 'template_path': '%(confdir)s/ceph_brag/templates',
++ 'debug': True,
++ 'errors': {
++ 404: '/error/404',
++ '__force_dict__': True
++ }
++}
++
++logging = {
++ 'loggers': {
++ 'root': {'level': 'INFO', 'handlers': ['console']},
++ 'ceph_brag': {'level': 'DEBUG', 'handlers': ['console']},
++ 'py.warnings': {'handlers': ['console']},
++ '__force_dict__': True
++ },
++ 'handlers': {
++ 'console': {
++ 'level': 'DEBUG',
++ 'class': 'logging.StreamHandler',
++ 'formatter': 'simple'
++ }
++ },
++ 'formatters': {
++ 'simple': {
++ 'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
++ '[%(threadName)s] %(message)s')
++ }
++ }
++}
++
++sqlalchemy = {
++ 'url' : 'sqlite:////tmp/test.db',
++ 'echo' : False,
++ 'encoding' : 'utf-8'
++}
++
++
++# Custom Configurations must be in Python dictionary format::
++#
++# foo = {'bar':'baz'}
++#
++# All configurations are accessible at::
++# pecan.conf
--- /dev/null
--- /dev/null
++from unittest import TestCase
++from webtest import TestApp
++from ceph_brag.tests import FunctionalTest
++import json, sys
++from pecan import request
++
++class TestRootController(FunctionalTest):
++ def test_1_get_invalid_url_format(self):
++ response = self.app.get('/1/2/3', expect_errors=True)
++ assert response.status_int == 400
++
++ def test_2_put(self):
++ with open ("sample.json", "r") as myfile:
++ data=myfile.read().replace('\n', '')
++ response = self.app.request('/', method='PUT', body=data)
++ assert response.status_int == 201
++
++ def test_3_put_invalid_json(self):
++ response = self.app.request('/', method='PUT', body='{asdfg', expect_errors=True)
++ assert response.status_int == 422
++
++ def test_4_put_invalid_entries_1(self):
++ response = self.app.request('/', method='PUT', body='{}', expect_errors=True)
++ assert response.status_int == 422
++
++ def test_5_put_incomplete_json(self):
++ response = self.app.request('/', method='PUT', body='{\"uuid\":\"adfs-12312ad\"}',
++ expect_errors=True)
++ assert response.status_int == 422
++
++ def test_6_get(self):
++ response = self.app.get('/')
++ js = json.loads(response.body)
++ for entry in js:
++ ci = entry
++ break
++
++ response = self.app.get('/'+ci['uuid']+'/'+str(ci['num_versions']))
++ assert response.status_int == 200
++
++ def test_7_get_invalid_uuid(self):
++ response = self.app.get('/xxxxxx', expect_errors=True)
++ assert response.status_int == 400
++
++ def test_8_get_invalid_version(self):
++ response = self.app.get('/')
++ js = json.loads(response.body)
++ for entry in js:
++ ci = entry
++ break
++
++ response = self.app.get('/'+ci['uuid']+'/'+str(0), expect_errors=True)
++ assert response.status_int == 400
++
++ def test_9_delete_invalid_parameters(self):
++ response = self.app.delete('/', expect_errors=True)
++ assert response.status_int == 400
++
++ def test_91_delete_wrong_uuid(self):
++ response = self.app.delete('/?uuid=xxxx', expect_errors=True)
++ assert response.status_int == 400
++
++ def test_92_delete(self):
++ response = self.app.get('/')
++ js = json.loads(response.body)
++ for entry in js:
++ response = self.app.delete('/?uuid='+entry['uuid'])
++ assert response.status_int == 200
--- /dev/null
--- /dev/null
++from unittest import TestCase
++
++
++class TestUnits(TestCase):
++
++ def test_units(self):
++ assert 5 * 5 == 25
--- /dev/null
--- /dev/null
++# Server Specific Configurations
++server = {
++ 'port': '8080',
++ 'host': '0.0.0.0'
++}
++
++# Pecan Application Configurations
++app = {
++ 'root': 'ceph_brag.controllers.root.RootController',
++ 'modules': ['ceph_brag'],
++ 'debug': True,
++ 'errors': {
++ 404: '/error/404',
++ '__force_dict__': True
++ }
++}
++
++logging = {
++ 'loggers': {
++ 'root': {'level': 'INFO', 'handlers': ['console']},
++ 'ceph_brag': {'level': 'DEBUG', 'handlers': ['console']},
++ 'py.warnings': {'handlers': ['console']},
++ '__force_dict__': True
++ },
++ 'handlers': {
++ 'console': {
++ 'level': 'DEBUG',
++ 'class': 'logging.StreamHandler',
++ 'formatter': 'simple'
++ }
++ },
++ 'formatters': {
++ 'simple': {
++ 'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
++ '[%(threadName)s] %(message)s')
++ }
++ }
++}
++
++sqlalchemy = {
++ 'url' : 'sqlite:////tmp/test.db',
++ 'echo' : False,
++ 'encoding' : 'utf-8'
++}
++
++
++# Custom Configurations must be in Python dictionary format::
++#
++# foo = {'bar':'baz'}
++#
++# All configurations are accessible at::
++# pecan.conf
--- /dev/null
--- /dev/null
++{
++ "cluster_creation_date": "2014-01-16 13:38:41.928551",
++ "uuid": "20679d0e-04b1-4004-8ee9-45ac271510e9",
++ "components_count": {
++ "bytes": {
++ "count": 0,
++ "scale": "bytes"
++ },
++ "osds": 1,
++ "objects": 0,
++ "pgs": 192,
++ "pools": 3,
++ "mdss": 1,
++ "mons": 1
++ },
++ "crush_types": [
++ "osd",
++ "host",
++ "chassis",
++ "rack",
++ "row",
++ "pdu",
++ "pod",
++ "room",
++ "datacenter",
++ "region",
++ "root"
++ ],
++ "ownership": {
++ "organization": "eNovance",
++ "description": "Use case1",
++ "email": "mail@enovance.com",
++ "name": "Cluster1"
++ },
++ "pool_metadata": [
++ {
++ "rep_size": 3,
++ "id": "0",
++ "name": "data"
++ },
++ {
++ "rep_size": 3,
++ "id": "1",
++ "name": "metadata"
++ },
++ {
++ "rep_size": 3,
++ "id": "2",
++ "name": "rbd"
++ }
++ ],
++ "sysinfo": [
++ {
++ "nw_info": {
++ "hostname": "ceph-brag",
++ "address": "127.0.0.1"
++ },
++ "hw_info": {
++ "swap_kb": 0,
++ "arch": "x86_64",
++ "cpu": "Intel Xeon E312xx (Sandy Bridge)",
++ "mem_kb": 2051648
++ },
++ "id": 0,
++ "os_info": {
++ "version": "3.2.0-23-virtual",
++ "os": "Linux",
++ "description": "#36-Ubuntu SMP Tue Apr 10 22:29:03 UTC 2012",
++ "distro": "Ubuntu 12.04 precise (Ubuntu 12.04 LTS)"
++ },
++ "ceph_version": "ceph version 0.75-229-g4050eae (4050eae32cd77a1c210ca11d0f12c74daecb1bd3)"
++ }
++ ]
++ }
--- /dev/null
--- /dev/null
++[nosetests]
++match=^test
++where=ceph_brag
++nocapture=1
++cover-package=ceph_brag
++cover-erase=1
--- /dev/null
--- /dev/null
++# -*- coding: utf-8 -*-
++try:
++ from setuptools import setup, find_packages
++except ImportError:
++ from ez_setup import use_setuptools
++ use_setuptools()
++ from setuptools import setup, find_packages
++
++setup(
++ name='ceph_brag',
++ version='0.1',
++ description='',
++ author='',
++ author_email='',
++ install_requires=[
++ "pecan",
++ ],
++ test_suite='ceph_brag',
++ zip_safe=False,
++ include_package_data=True,
++ packages=find_packages(exclude=['ez_setup'])
++)