]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
Merge branch 'master' of https://github.com/enovance/ceph-brag into firefly 1409/head
authorBabu Shanmugam <anbu@enovance.com>
Mon, 10 Mar 2014 06:12:58 +0000 (06:12 +0000)
committerBabu Shanmugam <anbu@enovance.com>
Mon, 10 Mar 2014 06:12:58 +0000 (06:12 +0000)
1  2 
src/brag/README.md
src/brag/client/ceph-brag
src/brag/server/ceph_brag/json.py
src/brag/server/ceph_brag/model/db.py
src/brag/server/sample.json

index 1cbc11b4a7d2d92e5cdc04c1346f0c0a76270035,0000000000000000000000000000000000000000..55af44f83a6a394de2cd396306ca2192b890181c
mode 100644,000000..100644
--- /dev/null
@@@ -1,160 -1,0 +1,184 @@@
-         "bytes": {
-           "count": 0,
-           "scale": "bytes"
-         },
-         "osds": 1,
-         "objects": 0,
-         "pgs": 192,
-         "pools": 3,
-         "mdss": 1,
-         "mons": 1
 +# Ceph-brag
 +
 +`ceph-brag` is going to be an anonymized cluster reporting tool designed to collect a "registry" of Ceph clusters for community knowledge.
 +This data will be displayed on a public web page using UUID by default, but users can claim their cluster and publish information about ownership if they so desire.
 +
 +For more information please visit:
 +
 +* [Blueprint](http://wiki.ceph.com/Planning/Blueprints/Firefly/Ceph-Brag)
 +* [CDS Etherpad](http://pad.ceph.com/p/cdsfirefly-ceph-brag)
 +
 +# Client
 +
 +## How to use:
 +
 +### Pre-requisites:
 +ceph-brag uses 'ceph' python script. Hence, before executing ceph-brag script ensure that ceph services are all running and 'ceph' script is in 'PATH' environment
 +
 +### Runtime instructions:
 +Run 'ceph-brag -h' to get the usage information of this tool.
 +
 +### Sample output:
 +
 +    {
 +      "cluster_creation_date": "2014-01-16 13:38:41.928551",
 +      "uuid": "20679d0e-04b1-4004-8ee9-45ac271510e9",
 +      "components_count": {
-         "osd",
-         "host",
-         "chassis",
-         "rack",
-         "row",
-         "pdu",
-         "pod",
-         "room",
-         "datacenter",
-         "region",
-         "root"
++        "num_bytes": 0,
++        "num_osds": 1,
++        "num_objects": 0,
++        "num_pgs": 192,
++        "num_pools": 3,
++        "num_mdss": 1,
++        "num_mons": 1
 +      },
 +      "crush_types": [
-           "rep_size": 3,
-           "id": "0",
-           "name": "data"
++        {
++          "type": "osd"
++          "count": 2,
++        },
++        {
++          "type": "rack"
++          "count": 1,
++        },
++        {
++          "type": "host"
++          "count": 1,
++        },
++        {
++          "type": "root"
++          "count": 1,
++        }
 +      ],
 +      "ownership": {
 +        "organization": "eNovance",
 +        "description": "Use case1",
 +        "email": "mail@enovance.com",
 +        "name": "Cluster1"
 +      },
 +      "pool_metadata": [
 +        {
-           "rep_size": 3,
-           "id": "1",
-           "name": "metadata"
++          "size": 3,
++          "id": 0,
++          "type": 1
 +        },
 +        {
-           "rep_size": 3,
-           "id": "2",
-           "name": "rbd"
++          "size": 3,
++          "id": 1,
++          "type": 1
 +        },
 +        {
-       "sysinfo": [
-         {
-           "nw_info": {
-             "hostname": "ceph-brag",
-             "address": "127.0.0.1"
-           },
-           "hw_info": {
-             "swap_kb": 0,
-             "arch": "x86_64",
-             "cpu": "Intel Xeon E312xx (Sandy Bridge)",
-             "mem_kb": 2051648
-           },
-           "id": 0,
-           "os_info": {
-             "version": "3.2.0-23-virtual",
-             "os": "Linux",
-             "description": "#36-Ubuntu SMP Tue Apr 10 22:29:03 UTC 2012",
++          "size": 3,
++          "id": 2,
++          "name": 1
 +        }
 +      ],
-           },
-           "ceph_version": "ceph version 0.75-229-g4050eae (4050eae32cd77a1c210ca11d0f12c74daecb1bd3)"
-         }
-       ]
++      "sysinfo": {
++        "kernel_types": [
++          {
++            "count": 1,
++            "type": "#36-Ubuntu SMP Tue Apr 10 22:29:03 UTC 2012"
++          }
++        ],
++        "cpu_archs": [
++          {
++            "count": 1,
++            "arch": "x86_64"
++          }
++        ],
++        "cpus": [
++          {
++            "count": 1,
++            "cpu": "Intel Xeon E312xx (Sandy Bridge)"
++          }
++        ],
++        "kernel_versions": [
++          {
++            "count": 1,
++            "version": "3.2.0-23-virtual"
++          }
++        ],
++        "ceph_versions": [
++          {
++            "count": 1,
++            "version": "0.75-229-g4050eae(4050eae32cd77a1c210ca11d0f12c74daecb1bd3)"
++          }
++        ],
++        "os_info": [
++          {
++            "count": 1,
++            "os": "Linux"
++          }
++        ],
++        "distros": [
++          {
++            "count": 1,
 +            "distro": "Ubuntu 12.04 precise (Ubuntu 12.04 LTS)"
++          }
++        ]
++      }
 +    }
 +
 +
 +# Server
 +
 +## Info
 +The ceph-brag server code is a python based web application. 
 +
 +## How to use
 +
 +### Prerequisites
 +* [pecan](http://pecanpy.org) is the web framework that is used by this application.
 +* [sqlalchemy](www.sqlalchemy.org) is the ORM that is used by this application
 +
 +### How to deploy
 +* [Common recipes to deploy](http://pecan.readthedocs.org/en/latest/deployment.html#common-recipes)
 +* Modify server/config.py:sqlalchemy['url'] to point the correct database connection
 +
 +## URLs
 +Following are the REST urls that are implemented with 'url-prefix' being the mount point for the WSGI script
 +
 +### GET
 +
 +##### * GET /url-prefix/
 +Returns the list of clusters that are registered so far. 
 +Outputs - On success application/json of the following format is returned
 +
 +    [
 +      {
 +       "num_versions": 3, 
 +       "cluster_creation_date": "2014-01-16 13:38:41.928551", 
 +       "uuid": "20679d0e-04b1-4004-8ee9-45ac271510e9", 
 +       "cluster_name": "Cluster1", 
 +       "organization": "eNovance", 
 +       "email": "mail@enovance.com"
 +      },
 +      ...
 +    ]
 +
 +##### * GET /url-prefix/UUID
 +Returns the list of version information for a particular UUID.
 +Outputs - On success application/json of the following format is returned
 +
 +    [
 +      {
 +        "version_number": 1, 
 +        "version_date": "2014-02-10 10:17:56.283499"
 +      },
 +      ...
 +    ]
 +
 +##### * GET /url-prefix/UUID/version\_number
 +Returns the entire brag report as mentioned in client's sample output for a particular version of a UUID
 +
 +### PUT
 +
 +##### * PUT /url-prefix
 +Uploads the brag report and creates a new version for the UUID mentioned in the payload
 +
 +### DELETE
 +
 +##### * DELETE /url-prefix?uuid=xxxx
 +Deletes all the versions of a cluster whose UUID is sent as a parameter
 +
 +
index 838189942c58ac417001f84cdea3310e409b07ee,0000000000000000000000000000000000000000..e07ad01b6635c0d55961f0812f895a6195dc380b
mode 100755,000000..100755
--- /dev/null
@@@ -1,350 -1,0 +1,349 @@@
-   (rc, o, e) = run_command(['ceph', 'mon', 'dump'])
 +#!/usr/bin/env python
 +
 +import subprocess
 +import uuid
 +import re
 +import json
 +import sys
 +import ast
 +import requests
++from collections import Counter
 +
 +CLUSTER_UUID_NAME='cluster-uuid'
 +CLUSTER_OWNERSHIP_NAME='cluster-ownership'
 +
 +def run_command(cmd):
 +  child = subprocess.Popen(cmd, stdout=subprocess.PIPE,
 +                       stderr=subprocess.PIPE)
 +  (o, e) = child.communicate()
 +  return (child.returncode, o, e)
 +
 +def get_uuid():
 +  (rc,uid,e) = run_command(['ceph', 'config-key', 'get', CLUSTER_UUID_NAME])
 +  if rc is not 0:
 +    #uuid is not yet set.
 +    uid = str(uuid.uuid4())
 +    (rc, o, e) = run_command(['ceph', 'config-key', 'put',
 +                             CLUSTER_UUID_NAME, uid])
 +    if rc is not 0:
 +      raise RuntimeError("\'ceph config-key put\' failed -" + e)
 +
 +  return uid
 +
 +def get_cluster_creation_date():
-   rec = re.compile('(.*created\ )(.*)(\n.*)')
-   mo = rec.search(o);
-   if mo and mo.group(2) != '0.000000':
-     return mo.group(2)
++  (rc, o, e) = run_command(['ceph', 'mon', 'dump', '-f', 'json'])
 +  if rc is not 0:
 +    raise RuntimeError("\'ceph mon dump\' failed - " + e)
 +
-   # Try and get the date from osd dump
-   (rc, o, e) = run_command(['ceph', 'osd', 'dump'])
++  oj = json.loads(o)
++  return oj['created']
++
++def bytes_pretty_to_raw(byte_count, byte_scale):
++  if byte_scale == 'kB':
++    return byte_count >> 10
++  if byte_scale == 'MB':
++    return byte_count >> 20
++  if byte_scale == 'GB':
++    return byte_count >> 30
++  if byte_scale == 'TB':
++    return byte_count >> 40
++  if byte_scale == 'PB':
++    return byte_count >> 50
++  if byte_scale == 'EB':
++    return byte_count >> 60
++  
++  return byte_count
 +
-     raise RuntimeError("\'ceph osd dump\' failed - " + e)
++def get_nums():
++  (rc, o, e) = run_command(['ceph', '-s', '-f', 'json'])
 +  if rc is not 0:
-   mo = rec.search(o);
-   if not mo or mo.group(2) == '0.000000':
-     print >> sys.stderr, "Unable to get cluster creation date"
++    raise RuntimeError("\'ceph -s\' failed - " + e)
 +
-   return mo.group(2)
++  oj = json.loads(o)
++  num_mons = len(oj['monmap']['mons'])
++  num_osds = int(oj['osdmap']['osdmap']['num_in_osds'])
++  num_mdss = oj['mdsmap']['in']
 +
- def get_nums():
-   (rc, o, e) = run_command(['ceph', '-s'])
++  pgmap = oj['pgmap']
++  num_pgs = pgmap['num_pgs']
++  num_bytes = pgmap['data_bytes']
 +
-     raise RuntimeError("\'ceph -s\' failed - " + e)
-   num_mons = 0
-   mo = re.search('(.*monmap\ .*:\ )(\d+)(.*)', o)
-   if not mo:
-     raise RuntimeError("Unmatched pattern for monmap in \'ceph status\'")
-   else:
-     num_mons = int(mo.group(2))
-   num_osds = 0
-   mo = re.search('.*osdmap.*(\d+).*(\d+).*(\d+).*', o)
-   if not mo:
-     raise RuntimeError("Unmatched pattern for osdmap in \'ceph status\'")
-   else:
-     num_osds = int(mo.group(1))
-   num_mdss = 0
-   mo = re.search('.*mdsmap\ e\d+.*(\d+)/(\d+)/(\d+).*', o)
-   if mo:
-     num_mdss = int(mo.group(2));
-   num_pgs = 0
-   num_pools = 0
-   num_bytes = 0
++  (rc, o, e) = run_command(['ceph', 'pg', 'dump', 'pools', '-f', 'json-pretty'])
 +  if rc is not 0:
-   mo = re.search('.*pgmap\ v\d+:\ (\d+).*,\ (\d+).*,\ (\d+)\ (\S+)\ data,\ (\d+).*', o)
-   if not mo:
-     raise RuntimeError("Unmatched pattern for pgmap in \'ceph status\'")
-   else:
-     num_pgs = int(mo.group(1))
-     num_pools = int(mo.group(2))
-     byte_count = int(mo.group(3))
-     byte_scale = mo.group(4)
-     num_objs = int(mo.group(5))
-   nums = {'mons':num_mons,
-           'osds':num_osds,
-           'mdss':num_mdss,
-           'pgs':num_pgs,
-           'pools':num_pools,
-           'bytes': {'count':byte_count, 'scale':byte_scale},
-           'objects':num_objs}
++    raise RuntimeError("\'ceph pg dump pools\' failed - " + e)
++ 
++  pools = json.loads(o)
++  num_pools = len(pools)
 +  num_objs = 0
-   crush_types = []
++  for p in pools:
++    num_objs += p['stat_sum']['num_objects']
++
++  nums = {'num_mons':num_mons,
++          'num_osds':num_osds,
++          'num_mdss':num_mdss,
++          'num_pgs':num_pgs,
++          'num_bytes':num_bytes,
++          'num_pools':num_pools,
++          'num_objects':num_objs}
 +  return nums
 +
 +def get_crush_types():
 +  (rc, o, e) = run_command(['ceph', 'osd', 'crush', 'dump'])
 +  if rc is not 0:
 +    raise RuntimeError("\'ceph osd crush dump\' failed - " + e)
 +
 +  crush_dump = json.loads(o)
 +  if crush_dump['types'] is None:
 +    raise RuntimeError("\'types\' item missing in \'ceph osd crush dump\'")
 +
-     crush_types.append(t['name'])
-   return crush_types
++  crush_types = {}
 +  for t in crush_dump['types']:
-   (rc, o, e) = run_command(['ceph', 'osd', 'dump'])
++    crush_types[t['type_id']] = t['name']
++
++  buckets = {}
++  items_list = []
++  for bucket in crush_dump['buckets']:
++    buckets[bucket['id']] = bucket['type_id']
++    for item in bucket['items']:
++      items_list.append(item['id'])
++
++  crush_map = []
++  counter = Counter(items_list)
++  append = lambda t,c: crush_map.append({'type':t, 'count':c})
++  for id,count in counter.items():
++    if id in buckets:
++      append(crush_types[buckets[id]],
++             count)
++      del buckets[id]
++    else:
++      append(crush_types[id], count)
++
++  #the root item
++  for id,type_id in buckets.items():
++    append(crush_types[type_id], 1)
++
++  return crush_map
 +
 +def get_pool_metadata():
-   result = re.findall("pool\ (\d+)\ '(\S+)'\ rep\ size\ (\d+)", o)
-   if len(result) is 0:
-     #Check with replicated size
-     result = re.findall("pool\ (\d+)\ '(\S+)'\ replicated\ size\ (\d+)", o)
-     if len(result) is 0:
-       raise RuntimeError("Unmatched pattern for \'pool\' in \'ceph osd dump\'")
++  (rc, o, e) = run_command(['ceph', 'osd', 'dump', '-f', 'json'])
 +  if rc is not 0:
 +    raise RuntimeError("\'ceph osd dump\' failed - " + e)
 +
-   proc = lambda x: {'id':x[0], 'name':x[1], 'rep_size':int(x[2])}
-   for r in result:
-     pool_meta.append(proc(r))
 +  pool_meta = []
-   sysinfo = []
++  oj = json.loads(o)
++  proc = lambda x: {'id':x['pool'], 'type':x['type'], 'size':x['size']}
++  for p in oj['pools']:
++    pool_meta.append(proc(p))
 +
 +  return pool_meta
 +
 +def get_sysinfo(max_osds):
 +  count = 0
 +  osd_metadata_available = False
-       os_info = {}
-       hw_info = {}
-       nw_info = {}
++  
++  os = {}
++  kern_version = {}
++  kern_description = {}
++  distro = {}
++  cpu = {}
++  arch = {}
++  ceph_version = {}
++
++  incr = lambda a,k: 1 if k not in a else a[k]+1
 +  while count < max_osds:
 +    meta = {'id':count}
 +    (rc, o, e) = run_command(['ceph', 'osd', 'metadata', str(count)])
 +    if rc is 0:
 +      if osd_metadata_available is False:
 +        osd_metadata_available = True
-       meta['ceph_version'] = version[2]
 +
 +      jmeta = json.loads(o)
 +
 +      version = jmeta['ceph_version'].split()
-         meta['ceph_version'] += version[3]
++      cv = version[2]
 +      if (len(version) > 3):
-       os_info['os'] = jmeta['os']
-       os_info['version'] = jmeta['kernel_version']
-       os_info['description'] = jmeta['kernel_description']
++        cv += version[3]
 +
-         distro = jmeta['distro'] + ' '
-         distro += jmeta['distro_version'] + ' '
-         distro += jmeta['distro_codename'] + ' ('
-         distro += jmeta['distro_description'] + ')'
-         os_info['distro'] = distro
++      ceph_version[cv] = incr(ceph_version, cv)
++      os[jmeta['os']] = incr(os, jmeta['os'])
++      kern_version[jmeta['kernel_version']] = \
++            incr(kern_version, jmeta['kernel_version'])
++      kern_description[jmeta['kernel_description']] = \
++            incr(kern_description, jmeta['kernel_description'])
 +
 +      try:
-       meta['os_info'] = os_info
++        dstr = jmeta['distro'] + ' '
++        dstr += jmeta['distro_version'] + ' '
++        dstr += jmeta['distro_codename'] + ' ('
++        dstr += jmeta['distro_description'] + ')'
++        distro[dstr] = incr(distro, dstr)
 +      except KeyError as ke:
 +        pass
-       hw_info['cpu'] = jmeta['cpu']
-       hw_info['arch'] = jmeta['arch']
-       hw_info['mem_kb'] = int(jmeta['mem_total_kb'])
-       hw_info['swap_kb'] = int(jmeta['mem_swap_kb'])
-       meta['hw_info'] = hw_info
 +  
-     (ip, hname) = get_osd_host(count)
-     nw_info['address'] = ip
-     nw_info['hostname'] = hname
-     meta['nw_info'] = nw_info
-     sysinfo.append(meta)
++      cpu[jmeta['cpu']] = incr(cpu, jmeta['cpu'])
++      arch[jmeta['arch']] = incr(arch, jmeta['arch'])
 +  
 +    count = count + 1
 +
++  sysinfo = {}
 +  if osd_metadata_available is False:
 +    print >> sys.stderr, "'ceph osd metadata' is not available at all"
- def get_osd_host(osd_id):
-   loc = {}
-   (rc, o, e) = run_command(['ceph', 'osd', 'find', str(osd_id)])
-   if rc is not 0:
-     raise RuntimeError("\'ceph osd find\' failed - " + e)
-   jloc = json.loads(o)
-   mo = re.search("(\d+.\d+.\d+.\d+).*", jloc['ip'])
-   if mo is None:
-     #Might be in ipv6 format, TODO: Verify
-     return None;
-   ip = mo.group(1)
-   host = jloc['crush_location']['host']
-   return (ip, host)
++    return sysinfo
++
++  def jsonify(type_count, name, type_name):
++    tmp = []
++    for k, v in type_count.items():
++      tmp.append({type_name:k, 'count':v})
++    sysinfo[name] = tmp
++
++  jsonify(os, 'os_info', 'os')
++  jsonify(kern_version, 'kernel_versions', 'version')
++  jsonify(kern_description, 'kernel_types', 'type')
++  jsonify(distro, 'distros', 'distro')
++  jsonify(cpu, 'cpus', 'cpu')
++  jsonify(arch, 'cpu_archs', 'arch')
++  jsonify(ceph_version, 'ceph_versions', 'version')
 +  return sysinfo
 +
-   num_osds = int(nums['osds'])
 +def get_ownership_info():
 +  (rc, o, e) = run_command(['ceph', 'config-key', 'get',
 +                            CLUSTER_OWNERSHIP_NAME])
 +  if rc is not 0:
 +    return {}
 +
 +  return ast.literal_eval(o)
 +
 +def output_json():
 +  out = {}
 +  url = None
 +  
 +  out['uuid'] = get_uuid()
 +  out['cluster_creation_date'] = get_cluster_creation_date()
 +  nums = get_nums()
++  num_osds = int(nums['num_osds'])
 +  out['components_count'] = nums
 +  out['crush_types'] = get_crush_types()
 +  out['pool_metadata'] = get_pool_metadata()
 +  out['sysinfo'] = get_sysinfo(num_osds)
 +
 +  owner = get_ownership_info()
 +  if owner is not None:
 +    out['ownership'] = owner
 +    if 'url' in owner:
 +      url = owner.pop('url')
 +
 +  return json.dumps(out, indent=2, separators=(',', ': ')), url
 +
 +def describe_usage():
 +  print >> sys.stderr, "Usage:"
 +  print >> sys.stderr, "======\n"
 +
 +  print >> sys.stderr, sys.argv[0] + " <commands> [command-options]\n"
 +  print >> sys.stderr, "commands:"
 +  print >> sys.stderr, "publish - publish the brag report to the server"
 +  print >> sys.stderr, "update-metadata <update-metadata-options> - Update"
 +  print >> sys.stderr, "         ownership information for bragging"
 +  print >> sys.stderr, "clear-metadata - Clear information set by update-metadata"
 +  print >> sys.stderr, "unpublish --yes-i-am-shy - delete the brag report from the server"
 +  print >> sys.stderr, ""
 +
 +  print >> sys.stderr, "update-metadata options:"
 +  print >> sys.stderr, "--name=  - Name of the cluster"
 +  print >> sys.stderr, "--organization= - Name of the organization"
 +  print >> sys.stderr, "--email= - Email contact address"
 +  print >> sys.stderr, "--description= - Reporting use-case"
 +  print >> sys.stderr, "--url= - The URL that is used to publish and unpublish"
 +  print >> sys.stderr, ""
 +
 +def update_metadata():
 +  info = {}
 +  possibles = ['name', 'organization', 'email', 'description', 'url']
 +
 +  #get the existing values
 +  info = get_ownership_info();
 +
 +  for index in range(2, len(sys.argv)):
 +    mo = re.search("--(\S+)=(.*)", sys.argv[index])
 +    if not mo:
 +      describe_usage()
 +      return 22
 +
 +    k = mo.group(1)
 +    v = mo.group(2)
 +
 +    if k in possibles:
 +      info[k] = v
 +    else:
 +      print >> sys.stderr, "Unexpect option --" + k
 +      describe_usage()
 +      return 22
 +
 +  (rc, o, e) = run_command(['ceph', 'config-key', 'put',
 +                            CLUSTER_OWNERSHIP_NAME, str(info)])
 +  return rc
 +
 +def clear_metadata():
 +  (rc, o, e) = run_command(['ceph', 'config-key', 'del',
 +                            CLUSTER_OWNERSHIP_NAME])
 +  return rc
 +
 +def publish():
 +  data, url = output_json()
 +  if url is None:
 +    print >> sys.stderr, "Cannot publish until a URL is set using update-metadata"
 +    return 1
 +
 +  req = requests.put(url, data=data)
 +  if req.status_code is not 201:
 +    print >> sys.stderr, "Failed to publish, server responded with code " + str(req.status_code)
 +    print >> sys.stderr, req.text
 +    return 1
 +
 +  return 0
 +
 +def unpublish():
 +  if len(sys.argv) <= 2 or sys.argv[2] != '--yes-i-am-shy':
 +    print >> sys.stderr, "unpublish should be followed by --yes-i-am-shy"
 +    return 22
 +
 +  fail = False
 +  owner = get_ownership_info()
 +  if owner is None:
 +    fail = True
 +  try:
 +    url = owner['url']
 +  except KeyError as e:
 +    fail = True
 +
 +  if fail:
 +    print >> sys.stderr, "URL is not updated yet"
 +    return 1
 +
 +  uuid = get_uuid()
 +  
 +  params = {'uuid':uuid}
 +  req = requests.delete(url, params=params)
 +  if req.status_code is not 200:
 +    print >> sys.stderr, "Failed to unpublish, server responsed with code " + str(req.status_code)
 +    return 1 
 +
 +  return 0
 +
 +def main():
 +  if len(sys.argv) is 1:
 +    print output_json()[0]
 +    return 0
 +  elif sys.argv[1] == 'update-metadata':
 +    return update_metadata()
 +  elif sys.argv[1] == 'clear-metadata':
 +    return clear_metadata()
 +  elif sys.argv[1] == 'publish':
 +    return publish()
 +  elif sys.argv[1] == 'unpublish':
 +    return unpublish()
 +  else:
 +    describe_usage()
 +    return 22
 +
 +if __name__ == '__main__':
 +  sys.exit(main())
index 856ea4eaf8946e42251798ef0b41ced5d6cfed79,0000000000000000000000000000000000000000..bc4670287f2e85861074d02d6e327b537536944c
mode 100644,000000..100644
--- /dev/null
@@@ -1,67 -1,0 +1,114 @@@
-             bytes={'count':comps.byte_count, 'scale':comps.byte_scale},
-             osds=comps.num_osds,
-             objects=comps.num_objects,
-             pgs=comps.num_pgs,
-             pools=comps.num_pools,
-             mdss=comps.num_mdss,
-             mons=comps.num_mons
-             )
 +from pecan.jsonify import jsonify
 +from ceph_brag.model import db 
 +
 +@jsonify.register(db.version_info)
 +def jsonify_version(vi):
 +    return dict(
 +            version_number=vi.version_number,
 +            version_date=str(vi.version_date)
 +            )
 +
 +@jsonify.register(db.cluster_info)
 +def jsonify_cluster_info(ci):
 +    return dict(
 +              uuid=ci.uuid,
 +              organization=ci.organization,
 +              email=ci.contact_email,
 +              cluster_name=ci.cluster_name,
 +              cluster_creation_date=str(ci.cluster_creation_date),
 +              num_versions=ci.num_versions
 +              )
 +
 +@jsonify.register(db.components_info)
 +def jsonify_components_info(comps):
 +    return dict(
-     return dict(rep_size=pool.pool_rep_size,
-                 name=pool.pool_name,
-                 id=pool.pool_id
-                )
++            num_bytes=comps.num_bytes,
++            num_osds=comps.num_osds,
++            num_objects=comps.num_objects,
++            num_pgs=comps.num_pgs,
++            num_pools=comps.num_pools,
++            num_mdss=comps.num_mdss,
++            num_mons=comps.num_mons)
++
++@jsonify.register(db.crush_types)
++def jsonify_crush_types(crush):
++    return dict(type=crush.crush_type,
++                count=crush.crush_count)
 +
 +@jsonify.register(db.pools_info)
 +def jsonify_pools_info(pool):
- @jsonify.register(db.osds_info)
- def jsonify_osds_info(osd):
-     return dict(nw_info={'address':osd.nw_address,'hostname':osd.hostname},
-                 hw_info={'swap_kb':osd.swap_kb,'mem_kb':osd.mem_kb,
-                          'arch':osd.arch, 'cpu':osd.cpu},
-                 id=osd.osd_id,
-                 os_info={'os':osd.os,'version':osd.os_version,
-                          'description':osd.os_desc, 'distro':osd.distro},
-                 ceph_version=osd.ceph_version
-                )
++    return dict(size=pool.pool_rep_size,
++                type=pool.pool_type,
++                id=pool.pool_id)
++
++@jsonify.register(db.os_info)
++def jsonify_os_info(value):
++    return dict(os=value.os,
++                count=value.count)
++
++@jsonify.register(db.kernel_versions)
++def jsonify_kernel_versions(value):
++    return dict(version=value.version,
++                count=value.count)
++
++@jsonify.register(db.kernel_types)
++def jsonify_kernel_types(value):
++    return dict(type=value.type,
++                count=value.count)
++
++@jsonify.register(db.distros)
++def jsonify_distros(value):
++    return dict(distro=value.distro,
++                count=value.count)
++
++@jsonify.register(db.cpus)
++def jsonify_cpus(value):
++    return dict(cpu=value.cpu,
++                count=value.count)
++
++@jsonify.register(db.cpu_archs)
++def jsonify_cpu_archs(value):
++    return dict(arch=value.arch,
++                count=value.count)
++
++@jsonify.register(db.ceph_versions)
++def jsonify_ceph_versions(value):
++    return dict(version=value.version,
++                count=value.count)
++
++@jsonify.register(db.sysinfo)
++def jsonify_sysinfo(value):
++    retval = {}
++    
++    if value.os:
++      retval['os_info'] = value.os
++    if value.kern_vers:
++      retval['kernel_versions'] = value.kern_vers
++    if value.kern_types:
++      retval['kernel_types'] = value.kern_types
++    if value.distros:
++      retval['distros'] = value.distros
++    if value.cpus:
++      retval['cpus'] = value.cpus
++    if value.cpu_archs:
++      retval['cpu_archs'] = value.cpu_archs
++    if value.ceph_vers:
++      retval['ceph_versions'] = value.ceph_vers
 +
-     crush_types=b.comps.crush_types.split(',')
++    return retval
 +
 +@jsonify.register(db.brag)
 +def jsonify_brag(b):
 +    ownership = {'organization':b.ci.organization,
 +                 'description':b.ci.description,
 +                 'email':b.ci.contact_email,
 +                 'name':b.ci.cluster_name
 +                } 
-                 crush_types=crush_types,
 +    return dict(uuid=b.ci.uuid,
 +                cluster_creation_date=str(b.ci.cluster_creation_date),
 +                components_count=b.comps,
-                 sysinfo=b.osds
++                crush_types=b.crush,
 +                ownership=ownership,
 +                pool_metadata=b.pools,
++                sysinfo=b.sysinfo
 +                )
index 974b37ce255037669dba64ae685f326cc314026d,0000000000000000000000000000000000000000..94d98ffc04d2f6e1c18892ef56e48ba3084d9428
mode 100644,000000..100644
--- /dev/null
@@@ -1,191 -1,0 +1,282 @@@
- from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
 +import json
 +from datetime import datetime
 +from sqlalchemy.orm import sessionmaker, scoped_session
-   byte_count = Column(Integer)
-   byte_scale = Column(String(8))
++from sqlalchemy import Column, Integer, String, \
++     DateTime, ForeignKey, BigInteger
 +from sqlalchemy import PrimaryKeyConstraint
 +from sqlalchemy.ext.declarative import declarative_base
 +from sqlalchemy.ext.declarative import declared_attr
 +
 +Base = declarative_base()
 +Session = scoped_session(sessionmaker())
 +
 +class cluster_info(Base):
 +  __tablename__ = 'cluster_info'
 +
 +  index = Column(Integer, primary_key=True)
 +  uuid = Column(String(36), unique=True)
 +  organization = Column(String(64))
 +  contact_email = Column(String(32))
 +  cluster_name = Column(String(32))
 +  cluster_creation_date = Column(DateTime)
 +  description = Column(String(32))
 +  num_versions = Column(Integer)
 +
 +class version_info(Base):
 +  __tablename__ = 'version_info'
 +
 +  index = Column(Integer, primary_key=True)
 +  cluster_id = Column(ForeignKey('cluster_info.index'))
 +  version_number = Column(Integer)
 +  version_date = Column(DateTime)
 +
 +class components_info(Base):
 +  __tablename__ = 'components_info'
 +
 +  index = Column(Integer, primary_key=True)
 +  vid = Column(ForeignKey('version_info.index'))
-   crush_types = Column(String(256))
++  num_bytes = Column(BigInteger)
 +  num_osds = Column(Integer)
 +  num_objects = Column(Integer)
 +  num_pgs = Column(Integer)
 +  num_pools = Column(Integer)
 +  num_mdss = Column(Integer)
 +  num_mons = Column(Integer)
-   pool_id = Column(String(8))
-   pool_name = Column(String(16))
++
++class crush_types(Base):
++  __tablename__ = 'crush_types'
++
++  index = Column(Integer, primary_key=True)
++  vid = Column(ForeignKey('version_info.index'))
++  crush_type = Column(String(16))
++  crush_count = Column(Integer)
 +
 +class pools_info(Base):
 +  __tablename__ = 'pools_info'
 +
 +  index = Column(Integer, primary_key=True)
 +  vid = Column(ForeignKey('version_info.index'))
- class osds_info(Base):
-   __tablename__ = 'osds_info'
++  pool_id = Column(Integer)
++  pool_type = Column(Integer)
 +  pool_rep_size = Column(Integer)
 +
-   osd_id = Column(String(8))
-   nw_address = Column(String(16))
-   hostname = Column(String(16))
-   swap_kb = Column(Integer)
-   mem_kb = Column(Integer)
-   arch = Column(String(16))
-   cpu = Column(String(16))
++class os_info(Base):
++  __tablename__ = 'os_info'
 +
 +  index = Column(Integer, primary_key=True)
 +  vid = Column(ForeignKey('version_info.index'))
-   os_version = Column(String(16))
-   os_desc = Column(String(64))
 +  os = Column(String(16))
-   ceph_version = Column(String(64))
++  count = Column(Integer)
++
++class kernel_versions(Base):
++  __tablename__ = 'kernel_versions'
++
++  index = Column(Integer, primary_key=True)
++  vid = Column(ForeignKey('version_info.index'))
++  version = Column(String(16))
++  count = Column(Integer)
++
++class kernel_types(Base):
++  __tablename__ = 'kernel_types'
++
++  index = Column(Integer, primary_key=True)
++  vid = Column(ForeignKey('version_info.index'))
++  type = Column(String(64))
++  count = Column(Integer)
++
++class distros(Base):
++  __tablename__ = 'distros'
++
++  index = Column(Integer, primary_key=True)
++  vid = Column(ForeignKey('version_info.index'))
 +  distro = Column(String(64))
-       self.osds = Session.query(osds_info).filter_by(vid=self.vi.index).all()
++  count = Column(Integer)
++
++class cpus(Base):
++  __tablename__ = 'cpus'
++
++  index = Column(Integer, primary_key=True)
++  vid = Column(ForeignKey('version_info.index'))
++  cpu = Column(String(16))
++  count = Column(Integer)
++
++class cpu_archs(Base):
++  __tablename__ = 'cpu_archs'
++
++  index = Column(Integer, primary_key=True)
++  vid = Column(ForeignKey('version_info.index'))
++  arch = Column(String(16))
++  count = Column(Integer)
++
++class ceph_versions(Base):
++  __tablename__ = 'ceph_versions'
++
++  index = Column(Integer, primary_key=True)
++  vid = Column(ForeignKey('version_info.index'))
++  version = Column(String(16))
++  count = Column(Integer)
++
++class sysinfo(object):
++  def __init__(self, vindex):
++    self.os = Session.query(os_info).filter_by(vid=vindex).all()
++    self.kern_vers = Session.query(kernel_versions).filter_by(vid=vindex).all()
++    self.kern_types = Session.query(kernel_types).filter_by(vid=vindex).all()
++    self.distros = Session.query(distros).filter_by(vid=vindex).all()
++    self.cpus = Session.query(cpus).filter_by(vid=vindex).all()
++    self.cpu_archs = Session.query(cpu_archs).filter_by(vid=vindex).all()
++    self.ceph_vers = Session.query(ceph_versions).filter_by(vid=vindex).all()
 +
 +class brag(object):
 +  def __init__(self, uuid, version_number):
 +    self.ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
 +    if self.ci is not None:
 +      self.vi = Session.query(version_info).filter_by(cluster_id=self.ci.index, version_number=version_number).first()
 +    
 +    if self.ci is not None and self.vi is not None:
 +      self.comps = Session.query(components_info).filter_by(vid=self.vi.index).first()
++      self.crush = Session.query(crush_types).filter_by(vid=self.vi.index).all()
 +      self.pools = Session.query(pools_info).filter_by(vid=self.vi.index).all()
-                          byte_count=comps_count['bytes']['count'],
-                          byte_scale=comps_count['bytes']['scale'],
-                          num_osds=comps_count['osds'],
-                          num_objects=comps_count['objects'],
-                          num_pgs=comps_count['pgs'],
-                          num_pools=comps_count['pools'],
-                          num_mdss=comps_count['mdss'],
-                          num_mons=comps_count['mons'],
-                          crush_types=','.join(info['crush_types']))
++      self.sysinfo = sysinfo(self.vi.index)
 +
 +def put_new_version(data):
 +  info = json.loads(data)
 +  def add_cluster_info():
 +    ci = Session.query(cluster_info).filter_by(uuid=info['uuid']).first()
 +    if ci is None:
 +      dt = datetime.strptime(info['cluster_creation_date'], "%Y-%m-%d %H:%M:%S.%f")
 +      ci = cluster_info(uuid=info['uuid'], 
 +                        organization=info['ownership']['organization'],
 +                        contact_email=info['ownership']['email'],
 +                        cluster_name=info['ownership']['name'],
 +                        description=info['ownership']['description'],
 +                        cluster_creation_date=dt,
 +                        num_versions=1)
 +      Session.add(ci)
 +      Session.commit()
 +    else:
 +      ci.num_versions += 1 
 +
 +    return ci
 + 
 +  def add_version_info(ci):
 +    vi = version_info(cluster_id=ci.index, 
 +                      version_number=ci.num_versions,
 +                      version_date=datetime.now())
 +    Session.add(vi)
 +    return vi
 +
 +  def add_components_info(vi):
 +    comps_count= info['components_count']
 +    comps_info = components_info(vid=vi.index,
-                              pool_name=p['name'],
-                              pool_rep_size=p['rep_size']))
-   def add_osds_info(vi):
-     osds = info['sysinfo']
-     for o in osds:
-       osd = osds_info(vid=vi.index,
-                       osd_id=o['id'],
-                       nw_address=o['nw_info']['address'],
-                       hostname=o['nw_info']['hostname'],
-                       swap_kb=o['hw_info']['swap_kb'],
-                       mem_kb=o['hw_info']['mem_kb'],
-                       arch=o['hw_info']['arch'],
-                       cpu=o['hw_info']['cpu'],
-                       os=o['os_info']['os'],
-                       os_version=o['os_info']['version'],
-                       os_desc=o['os_info']['description'],
-                       distro=o['os_info']['distro'],
-                       ceph_version=o['ceph_version'])
-       Session.add(osd)
-                     
++                         num_bytes=comps_count['num_bytes'],
++                         num_osds=comps_count['num_osds'],
++                         num_objects=comps_count['num_objects'],
++                         num_pgs=comps_count['num_pgs'],
++                         num_pools=comps_count['num_pools'],
++                         num_mdss=comps_count['num_mdss'],
++                         num_mons=comps_count['num_mons'])
 +    Session.add(comps_info)
 +
++  def add_crush_types(vi):
++    for c in info['crush_types']:
++      Session.add(crush_types(vid=vi.index, 
++                            crush_type=c['type'],
++                            crush_count=c['count']))
++
 +  def add_pools_info(vi):
 +    pools = info['pool_metadata']
 +    for p in pools:
 +      Session.add(pools_info(vid=vi.index,
 +                             pool_id=p['id'],
-   add_osds_info(vi)
++                             pool_type=p['type'],
++                             pool_rep_size=p['size']))
++
++  def add_sys_info(vi):
++    si = info['sysinfo']
++    while si:
++      k,v = si.popitem()
++      if k == 'os_info':
++        for o in v:
++          Session.add(os_info(vid=vi.index, 
++                              os=o['os'],
++                              count=o['count']))
++      elif k == 'kernel_versions':
++        for k in v:
++          Session.add(kernel_versions(vid=vi.index,
++                                      version=k['version'],
++                                      count=k['count']))
++      elif k == 'kernel_types':
++        for k in v:
++          Session.add(kernel_types(vid=vi.index,
++                                   type=k['type'],
++                                   count=k['count']))
++      elif k == 'distros':
++        for d in v:
++          Session.add(distros(vid=vi.index,
++                              distro=d['distro'],
++                              count=d['count']))
++      elif k == 'cpus':
++        for c in v:
++          Session.add(cpus(vid=vi.index,
++                           cpu=c['cpu'],
++                           count=c['count']))
++      elif k == 'cpu_archs':
++        for c in v:
++          Session.add(cpu_archs(vid=vi.index,
++                                arch=c['arch'],
++                                count=c['count']))
++      elif k == 'ceph_versions':
++        for c in v:
++          Session.add(ceph_versions(vid=vi.index,
++                                    version=c['version'],
++                                    count=c['count']))
++
 +  ci = add_cluster_info()
 +  add_version_info(ci)
 +  vi = Session.query(version_info).filter_by(cluster_id=ci.index, 
 +                                             version_number=ci.num_versions).first()
 +  add_components_info(vi)
++  add_crush_types(vi)
 +  add_pools_info(vi)
-     Session.query(osds_info).filter_by(vid=v.index).delete()
++  add_sys_info(vi)
 + 
 +def delete_uuid(uuid):
 +  ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
 +  if ci is None:
 +    return {'status':400, 'msg':'No information for this UUID'}
 +
 +  for v in Session.query(version_info).filter_by(cluster_id=ci.index).all():
 +    Session.query(components_info).filter_by(vid=v.index).delete()
++    Session.query(crush_types).filter_by(vid=v.index).delete()
 +    Session.query(pools_info).filter_by(vid=v.index).delete()
++    Session.query(os_info).filter_by(vid=v.index).delete()
++    Session.query(kernel_versions).filter_by(vid=v.index).delete()
++    Session.query(kernel_types).filter_by(vid=v.index).delete()
++    Session.query(distros).filter_by(vid=v.index).delete()
++    Session.query(cpus).filter_by(vid=v.index).delete()
++    Session.query(cpu_archs).filter_by(vid=v.index).delete()
++    Session.query(ceph_versions).filter_by(vid=v.index).delete()
++
 +    Session.flush()
 +    Session.delete(v)
 +    Session.flush()
 +
 +  Session.delete(ci)
 +  return None
 +
 +def get_uuids():
 +  return Session.query(cluster_info).all()
 +
 +def get_versions(uuid):
 +  ci = Session.query(cluster_info).filter_by(uuid=uuid).first()
 +  if ci is None:
 +    return None
 +
 +  return Session.query(version_info).filter_by(cluster_id=ci.index).all()
 +
 +def get_brag(uuid, version_id):
 +  b = brag(uuid, version_id)
 +  if b.ci is None or b.vi is None:
 +    return None
 +
 +  return b
index d51fe59f7f95643db6549d4b083fa094b105dc23,0000000000000000000000000000000000000000..194ec637be905a84409415dab9d893e37f4c24e2
mode 100644,000000..100644
--- /dev/null
@@@ -1,74 -1,0 +1,98 @@@
-       "cluster_creation_date": "2014-01-16 13:38:41.928551",
-       "uuid": "20679d0e-04b1-4004-8ee9-45ac271510e9",
-       "components_count": {
-         "bytes": {
-           "count": 0,
-           "scale": "bytes"
-         },
-         "osds": 1,
-         "objects": 0,
-         "pgs": 192,
-         "pools": 3,
-         "mdss": 1,
-         "mons": 1
-       },
-       "crush_types": [
-         "osd",
-         "host",
-         "chassis",
-         "rack",
-         "row",
-         "pdu",
-         "pod",
-         "room",
-         "datacenter",
-         "region",
-         "root"
-       ],
-       "ownership": {
-         "organization": "eNovance",
-         "description": "Use case1",
-         "email": "mail@enovance.com",
-         "name": "Cluster1"
-       },
-       "pool_metadata": [
-         {
-           "rep_size": 3,
-           "id": "0",
-           "name": "data"
-         },
-         {
-           "rep_size": 3,
-           "id": "1",
-           "name": "metadata"
-         },
-         {
-           "rep_size": 3,
-           "id": "2",
-           "name": "rbd"
-         }
-       ],
-       "sysinfo": [
-         {
-           "nw_info": {
-             "hostname": "ceph-brag",
-             "address": "127.0.0.1"
-           },
-           "hw_info": {
-             "swap_kb": 0,
-             "arch": "x86_64",
-             "cpu": "Intel Xeon E312xx (Sandy Bridge)",
-             "mem_kb": 2051648
-           },
-           "id": 0,
-           "os_info": {
-             "version": "3.2.0-23-virtual",
-             "os": "Linux",
-             "description": "#36-Ubuntu SMP Tue Apr 10 22:29:03 UTC 2012",
-             "distro": "Ubuntu 12.04 precise (Ubuntu 12.04 LTS)"
-           },
-           "ceph_version": "ceph version 0.75-229-g4050eae (4050eae32cd77a1c210ca11d0f12c74daecb1bd3)"
-         }
-       ]
 +{
++  "cluster_creation_date": "2014-01-16 13:38:41.928551",
++  "uuid": "20679d0e-04b1-4004-8ee9-45ac271510e9",
++  "components_count": {
++    "num_pgs": 192,
++    "num_mdss": 1,
++    "num_osds": 1,
++    "num_bytes": 0,
++    "num_pools": 3,
++    "num_mons": 1,
++    "num_objects": 0
++  },
++  "crush_types": [
++    {
++      "count": 1,
++      "type": "osd"
++    },
++    {
++      "count": 1,
++      "type": "rack"
++    },
++    {
++      "count": 1,
++      "type": "host"
++    },
++    {
++      "count": 1,
++      "type": "root"
 +    }
++  ],
++  "ownership": {
++    "organization": "eNovance",
++    "description": "Use case1",
++    "name": "Cluster1",
++    "email": "mail@enovance.com"
++  },
++  "pool_metadata": [
++    {
++      "type": 1,
++      "id": 0,
++      "size": 3
++    },
++    {
++      "type": 1,
++      "id": 1,
++      "size": 3
++    },
++    {
++      "type": 1,
++      "id": 2,
++      "size": 3
++    }
++  ],
++  "sysinfo": {
++    "kernel_types": [
++      {
++        "count": 1,
++        "type": "#36-Ubuntu SMP Tue Apr 10 22:29:03 UTC 2012"
++      }
++    ],
++    "cpu_archs": [
++      {
++        "count": 1,
++        "arch": "x86_64"
++      }
++    ],
++    "cpus": [
++      {
++        "count": 1,
++        "cpu": "Intel Xeon E312xx (Sandy Bridge)"
++      }
++    ],
++    "kernel_versions": [
++      {
++        "count": 1,
++        "version": "3.2.0-23-virtual"
++      }
++    ],
++    "ceph_versions": [
++      {
++        "count": 1,
++        "version": "0.75-229-g4050eae(4050eae32cd77a1c210ca11d0f12c74daecb1bd3)"
++      }
++    ],
++    "os_info": [
++      {
++        "count": 1,
++        "os": "Linux"
++      }
++    ],
++    "distros": [
++      {
++        "count": 1,
++        "distro": "Ubuntu 12.04 precise (Ubuntu 12.04 LTS)"
++      }
++    ]
++  }
++}