diff --git a/kvmagent/kvmagent/plugins/surfs_plugin.py b/kvmagent/kvmagent/plugins/surfs_plugin.py
new file mode 100755
index 0000000000..45a3734f36
--- /dev/null
+++ b/kvmagent/kvmagent/plugins/surfs_plugin.py
@@ -0,0 +1,213 @@
+from kvmagent import kvmagent
+from kvmagent.plugins import vm_plugin
+from zstacklib.utils import jsonobject
+from zstacklib.utils import http
+from zstacklib.utils import log
+from zstacklib.utils import shell
+from zstacklib.utils import lichbd
+from zstacklib.utils import sizeunit
+from zstacklib.utils import linux
+from zstacklib.utils import thread
+import zstacklib.utils.lichbd_factory as lichbdfactory
+import os.path
+import re
+import threading
+import time
+import traceback
+
+logger = log.get_logger(__name__)
+
+class AgentRsp(object):
+ def __init__(self):
+ self.success = True
+ self.error = None
+
+class ScanRsp(object):
+ def __init__(self):
+ super(ScanRsp, self).__init__()
+ self.result = None
+
+#kill all vm on localhost
+def kill_vm(maxAttempts):
+ vm_uuid_list = shell.call("virsh list | grep running | awk '{print $2}'")
+ for vm_uuid in vm_uuid_list.split('\n'):
+ vm_uuid = vm_uuid.strip(' \t\n\r')
+ if not vm_uuid:
+ continue
+
+ vm_pid = shell.call("ps aux | grep qemu-kvm | grep -v grep | awk '/%s/{print $2}'" % vm_uuid)
+ vm_pid = vm_pid.strip(' \t\n\r')
+ kill = shell.ShellCmd('kill -9 %s' % vm_pid)
+ kill(False)
+ if kill.return_code == 0:
+ logger.warn('kill the vm[uuid:%s, pid:%s] because we lost connection to the surfs storage.'
+ 'failed to read the heartbeat file %s times' % (vm_uuid, vm_pid, maxAttempts))
+ else:
+ logger.warn('failed to kill the vm[uuid:%s, pid:%s] %s' % (vm_uuid, vm_pid, kill.stderr))
+
+
+
+class SurfsPlugin(kvmagent.KvmAgent):
+ '''
+ classdocs
+ '''
+
+ SCAN_HOST_PATH = "/ha/scanhost"
+ SETUP_SELF_FENCER_PATH = "/ha/selffencer/setup"
+ SURFS_SELF_FENCER = "/ha/surfs/setupselffencer"
+
+ RET_SUCCESS = "success"
+ RET_FAILURE = "failure"
+ RET_NOT_STABLE = "unstable"
+
+ @kvmagent.replyerror
+ def setup_surfs_self_fencer(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+
+ @thread.AsyncThread
+ def heartbeat_on_surfsstor():
+ try:
+ failure = 0
+
+ while True:
+ time.sleep(cmd.interval)
+
+ heartbeatImage=cmd.heartbeatImagePath.split('/')[1]
+ create = shell.ShellCmd('timeout %s surfs --pretty create -V 1b %s ' %
+ (cmd.storageCheckerTimeout,heartbeatImage))
+ create(False)
+ create_stdout = create.stdout
+ read_heart_beat_file = False
+
+ if create.return_code == 0 and "true" in create.stdout:
+ failure = 0
+ continue
+ elif "false" in create_stdout and "volume already exists" in create_stdout:
+ read_heart_beat_file = True
+ else:
+ # will cause failure count +1
+ logger.warn('cannot create heartbeat file; %s' % create.stdout)
+ if read_heart_beat_file:
+ #shell: surfs --pretty volume mdvol4
+ touch = shell.ShellCmd('timeout %s surfs --pretty volume %s' %
+ (cmd.storageCheckerTimeout, heartbeatImage))
+ touch(False)
+ if "true" in touch.stdout:
+ failure = 0
+ continue
+ failure += 1
+ if failure == cmd.maxAttempts:
+ kill_vm(cmd.maxAttempts)
+ # reset the failure count
+ failure = 0
+ except:
+ content = traceback.format_exc()
+ logger.warn(content)
+
+ heartbeat_on_surfsstor()
+
+ return jsonobject.dumps(AgentRsp())
+
+ @kvmagent.replyerror
+ def setup_self_fencer(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ @thread.AsyncThread
+ def heartbeat_file_fencer(heartbeat_file_path):
+ try:
+ failure = 0
+
+ while True:
+ time.sleep(cmd.interval)
+
+ touch = shell.ShellCmd('timeout %s touch %s; exit $?' % (cmd.storageCheckerTimeout, heartbeat_file_path))
+ touch(False)
+ if touch.return_code == 0:
+ failure = 0
+ continue
+
+ logger.warn('unable to touch %s, %s %s' % (heartbeat_file_path, touch.stderr, touch.stdout))
+ failure += 1
+
+ if failure == cmd.maxAttempts:
+ logger.warn('failed to touch the heartbeat file[%s] %s times, we lost the connection to the storage,'
+ 'shutdown ourselves' % (heartbeat_file_path, cmd.maxAttempts))
+ kill_vm(cmd.maxAttempts)
+ except:
+ content = traceback.format_exc()
+ logger.warn(content)
+
+
+ for mount_point in cmd.mountPoints:
+ if not os.path.isdir(mount_point):
+ raise Exception('the mount point[%s] is not a directory' % mount_point)
+
+ hb_file = os.path.join(mount_point, 'heartbeat-file-kvm-host-%s.hb' % cmd.hostUuid)
+ heartbeat_file_fencer(hb_file)
+
+ return jsonobject.dumps(AgentRsp())
+
+
+ @kvmagent.replyerror
+ def scan_host(self, req):
+ rsp = ScanRsp()
+
+ success = 0
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ for i in range(0, cmd.times):
+ if shell.run("nmap -sP -PI %s | grep 'Host is up'" % cmd.ip) == 0:
+ success += 1
+
+ time.sleep(cmd.interval)
+
+ if success == cmd.successTimes:
+ rsp.result = self.RET_SUCCESS
+ return jsonobject.dumps(rsp)
+
+ if success == 0:
+ rsp.result = self.RET_FAILURE
+ return jsonobject.dumps(rsp)
+
+ success = 0
+ for i in range(0, cmd.successTimes):
+ if shell.run("nmap -sP -PI %s | grep 'Host is up'" % cmd.ip) == 0:
+ success += 1
+
+ time.sleep(cmd.successInterval)
+
+ if success == cmd.successTimes:
+ rsp.result = self.RET_SUCCESS
+ return jsonobject.dumps(rsp)
+
+ rsp.result = self.RET_NOT_STABLE
+ return jsonobject.dumps(rsp)
+
+ @kvmagent.replyerror
+ def fusionstor_query(self, req):
+ protocol = lichbd.get_protocol()
+ if protocol == 'lichbd':
+ lichbd.makesure_qemu_img_with_lichbd()
+ elif protocol == 'sheepdog' or protocol == 'nbd':
+ pass
+ else:
+ raise shell.ShellError('Do not supprot protocols, only supprot lichbd, sheepdog and nbd')
+
+ o = shell.call('lich.node --stat 2>/dev/null')
+ if 'running' not in o:
+ raise shell.ShellError('the lichd process of this node is not running, Please check the lichd service')
+
+ return jsonobject.dumps(kvmagent.AgentResponse())
+
+ def start(self):
+ self.host_uuid = None
+
+ http_server = kvmagent.get_http_server()
+ http_server.register_async_uri(self.SCAN_HOST_PATH, self.scan_host)
+ http_server.register_async_uri(self.SETUP_SELF_FENCER_PATH, self.setup_self_fencer)
+ http_server.register_async_uri(self.SURFS_SELF_FENCER, self.setup_surfs_self_fencer)
+
+ def stop(self):
+ pass
+
+ def configure(self, config):
+ self.config = config
+
diff --git a/kvmagent/kvmagent/plugins/vm_plugin.py b/kvmagent/kvmagent/plugins/vm_plugin.py
index 65693f37a9..3c484a8b3e 100644
--- a/kvmagent/kvmagent/plugins/vm_plugin.py
+++ b/kvmagent/kvmagent/plugins/vm_plugin.py
@@ -1587,10 +1587,52 @@ def virtio_scsi_fusionstor():
else:
return blk_fusionstor()
+ def get_solt_index(domainxml):
+ if domainxml is None:
+ return None
+ ix=0
+ max_index=0
+ txml=domainxml
+ while True:
+ ix=txml.find('slot=')
+ if ix < 0:
+ break;
+ solt16=txml[ix +6:ix+10]
+ if "'" in solt16:
+ solt16=solt16.replace("'", "")
+ try:
+ solt10=int(solt16,16)
+ if solt10 > max_index:
+ max_index=solt10
+ except:
+ txml=txml[ix + 11:]
+ continue
+ txml=txml[ix + 11:]
+ return hex(max_index + 1)
+
+ def surfs_file_volume(dxml,dm_xml):
+ if 'surfs_storage' in dxml is False:
+ return dxml
+ oml=dxml
+ soltstr=get_solt_index(dm_xml)
+ if soltstr is None :
+ return oml
+ else:
+ oml=''
+ oml = oml + ''
+ oml = oml + ''
+ oml = oml + ''
+ oml = oml + ''
+ oml = oml + ''
+ oml = oml + ''
+ return oml
+
+
if volume.deviceType == 'iscsi':
xml = iscsibased_volume()
elif volume.deviceType == 'file':
xml = filebased_volume()
+ xml = surfs_file_volume(xml, self.domain_xml)
elif volume.deviceType == 'ceph':
xml = ceph_volume()
elif volume.deviceType == 'fusionstor':
diff --git a/surfsbackupstorage/__init__.py b/surfsbackupstorage/__init__.py
new file mode 100755
index 0000000000..1c759d854f
--- /dev/null
+++ b/surfsbackupstorage/__init__.py
@@ -0,0 +1 @@
+__author__ = 'frank'
diff --git a/surfsbackupstorage/ansible/surfsb.py b/surfsbackupstorage/ansible/surfsb.py
new file mode 100644
index 0000000000..e6ce1b568c
--- /dev/null
+++ b/surfsbackupstorage/ansible/surfsb.py
@@ -0,0 +1,158 @@
+#!/usr/bin/env python
+# encoding: utf-8
+import argparse
+from zstacklib import *
+import os
+
+# create log
+logger_dir = "/var/log/zstack/"
+create_log(logger_dir)
+banner("Starting to deploy surfs backup agent")
+start_time = datetime.now()
+# set default value
+file_root = "files/surfsb"
+pip_url = "https=//pypi.python.org/simple/"
+proxy = ""
+sproxy = ""
+zstack_repo = 'false'
+post_url = ""
+pkg_surfsbagent = ""
+virtualenv_version = "12.1.1"
+remote_user = "root"
+remote_pass = None
+remote_port = None
+
+
+# get parameter from shell
+parser = argparse.ArgumentParser(description='Deploy surfs backup strorage to host')
+parser.add_argument('-i', type=str, help="""specify inventory host file
+ default=/etc/ansible/hosts""")
+parser.add_argument('--private-key', type=str, help='use this file to authenticate the connection')
+parser.add_argument('-e', type=str, help='set additional variables as key=value or YAML/JSON')
+
+args = parser.parse_args()
+argument_dict = eval(args.e)
+
+# update the variable from shell arguments
+locals().update(argument_dict)
+virtenv_path = "%s/virtualenv/surfsb/" % zstack_root
+surfsb_root = "%s/surfsb/package" % zstack_root
+host_post_info = HostPostInfo()
+host_post_info.host_inventory = args.i
+host_post_info.host = host
+host_post_info.post_url = post_url
+host_post_info.private_key = args.private_key
+host_post_info.remote_user = remote_user
+host_post_info.remote_pass = remote_pass
+host_post_info.remote_port = remote_port
+if remote_pass is not None and remote_user != 'root':
+ host_post_info.become = True
+
+# include zstacklib.py
+(distro, distro_version, distro_release) = get_remote_host_info(host_post_info)
+zstacklib_args = ZstackLibArgs()
+zstacklib_args.distro = distro
+zstacklib_args.distro_release = distro_release
+zstacklib_args.distro_version = distro_version
+zstacklib_args.zstack_repo = zstack_repo
+zstacklib_args.yum_server = yum_server
+zstacklib_args.zstack_root = zstack_root
+zstacklib_args.host_post_info = host_post_info
+zstacklib_args.pip_url = pip_url
+zstacklib_args.trusted_host = trusted_host
+zstacklib = ZstackLib(zstacklib_args)
+
+# name: judge this process is init install or upgrade
+if file_dir_exist("path=" + surfsb_root, host_post_info):
+ init_install = False
+else:
+ init_install = True
+ # name: create root directories
+ command = 'mkdir -p %s %s' % (surfsb_root, virtenv_path)
+ run_remote_command(command, host_post_info)
+
+run_remote_command("rm -rf %s/*" % surfsb_root, host_post_info)
+# name: install virtualenv
+virtual_env_status = check_and_install_virtual_env(virtualenv_version, trusted_host, pip_url, host_post_info)
+if virtual_env_status is False:
+ command = "rm -rf %s && rm -rf %s" % (virtenv_path, surfsb_root)
+ run_remote_command(command, host_post_info)
+ sys.exit(1)
+# name: make sure virtualenv has been setup
+# here change "rm -rf virtualenv" to "-f %s/bin/python" till zstack 2.0 due to a bug exist in old version, we need to
+# make sure all users upgrade to new version
+command = "rm -rf %s && virtualenv --system-site-packages %s " % (virtenv_path, virtenv_path)
+run_remote_command(command, host_post_info)
+
+if distro == "RedHat" or distro == "CentOS":
+ if zstack_repo != 'false':
+ command = ("pkg_list=`rpm -q wget qemu-img-ev libvirt libguestfs-winsupport libguestfs-tools | grep \"not installed\" | awk '{ print $2 }'` && for pkg"
+ " in $pkg_list; do yum --disablerepo=* --enablerepo=%s install -y $pkg; done;") % zstack_repo
+ run_remote_command(command, host_post_info)
+ if distro_version >= 7:
+ command = "(which firewalld && service firewalld stop && chkconfig firewalld off) || true"
+ run_remote_command(command, host_post_info)
+ else:
+ for pkg in [ "wget", "qemu-img-ev", "libvirt", "libguestfs-winsupport", "libguestfs-tools"]:
+ yum_install_package(pkg, host_post_info)
+ if distro_version >= 7:
+ command = "(which firewalld && service firewalld stop && chkconfig firewalld off) || true"
+ run_remote_command(command, host_post_info)
+ set_selinux("state=disabled", host_post_info)
+
+elif distro == "Debian" or distro == "Ubuntu":
+ install_pkg_list = ["wget", "qemu-utils", "libvirt-bin", "libguestfs-tools"]
+ apt_install_packages(install_pkg_list, host_post_info)
+ command = "(chmod 0644 /boot/vmlinuz*) || true"
+ run_remote_command(command, host_post_info)
+else:
+ error("unsupported OS!")
+
+# name: copy zstacklib
+copy_arg = CopyArg()
+copy_arg.src = "files/zstacklib/%s" % pkg_zstacklib
+copy_arg.dest = "%s/%s" % (surfsb_root, pkg_zstacklib)
+copy_zstacklib = copy(copy_arg, host_post_info)
+
+if copy_zstacklib != "changed:False":
+ agent_install_arg = AgentInstallArg(trusted_host, pip_url, virtenv_path, init_install)
+ agent_install_arg.agent_name = "zstacklib"
+ agent_install_arg.agent_root = surfsb_root
+ agent_install_arg.pkg_name = pkg_zstacklib
+ agent_install(agent_install_arg, host_post_info)
+
+# name: copy surfs backupstorage agent
+copy_arg = CopyArg()
+copy_arg.src = "%s/%s" % (file_root, pkg_surfsbagent)
+copy_arg.dest = "%s/%s" % (surfsb_root, pkg_surfsbagent)
+copy_surfsb = copy(copy_arg, host_post_info)
+
+if copy_surfsb != "changed:False":
+ agent_install_arg = AgentInstallArg(trusted_host, pip_url, virtenv_path, init_install)
+ agent_install_arg.agent_name = "surfsbackup"
+ agent_install_arg.agent_root = surfsb_root
+ agent_install_arg.pkg_name = pkg_surfsbagent
+ agent_install(agent_install_arg, host_post_info)
+
+# name: copy service file
+# only support centos redhat debian and ubuntu
+copy_arg = CopyArg()
+copy_arg.src = "%s/zstack-surfs-backupstorage" % file_root
+copy_arg.dest = "/etc/init.d/"
+copy_arg.args = "mode=755"
+copy(copy_arg, host_post_info)
+
+
+# name: restart surfsbagent
+if distro == "RedHat" or distro == "CentOS":
+ command = "service zstack-surfs-backupstorage stop && service zstack-surfs-backupstorage start && chkconfig zstack-surfs-backupstorage on"
+elif distro == "Debian" or distro == "Ubuntu":
+ command = "update-rc.d zstack-surfs-backupstorage start 97 3 4 5 . stop 3 0 1 2 6 . && service zstack-surfs-backupstorage stop && service zstack-surfs-backupstorage start"
+run_remote_command(command, host_post_info)
+# change surfs config
+
+
+host_post_info.start_time = start_time
+handle_ansible_info("SUCC: Deploy surfsbackup agent successful", host_post_info, "INFO")
+
+sys.exit(0)
diff --git a/surfsbackupstorage/ansible/surfsb.yaml b/surfsbackupstorage/ansible/surfsb.yaml
new file mode 100644
index 0000000000..adc67dc480
--- /dev/null
+++ b/surfsbackupstorage/ansible/surfsb.yaml
@@ -0,0 +1,94 @@
+---
+
+- hosts: "{{host}}"
+ vars:
+ - virtenv_path: "{{zstack_root}}/virtualenv/surfsb/"
+ - surfsb_root: "{{zstack_root}}/surfsb"
+ - file_root: "files/surfsb"
+ - pip_url: "{{pypi_url|default('https://pypi.python.org/simple/')}}"
+ - proxy: "{{http_proxy|default()}}"
+ - sproxy: "{{https_proxy|default()}}"
+ - yum_repos: "{{yum_repo|default('false')}}"
+
+ tasks:
+ - include: zstacklib.yaml
+
+ - name: create root directories
+ shell: "mkdir -p {{item}}"
+ with_items:
+ - "{{surfsb_root}}"
+ - "{{virtenv_path}}"
+
+ - name: install dependent packages on RedHat based OS from local
+ when: ansible_os_family == 'RedHat' and yum_repos != 'false'
+ shell: "yum --disablerepo=* --enablerepo={{yum_repos}} --nogpgcheck install -y wget qemu-img"
+
+ - name: install dependent packages on RedHat based OS from online
+ when: ansible_os_family == 'RedHat' and yum_repos == 'false'
+ shell: "yum --nogpgcheck install -y wget qemu-img"
+
+ - name: install dependent packages on Debian based OS
+ when: ansible_os_family == 'Debian'
+ apt: pkg="{{item}}"
+ with_items:
+ - wget
+ - qemu-utils
+
+ - name: RHEL7 specific packages from user defined repos
+ when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repos != 'false'
+ shell: "rpm -q iptables-services || yum --disablerepo=* --enablerepo={{yum_repos}} --nogpgcheck install -y iptables-services "
+
+ - name: RHEL7 specific packages from online
+ when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repos == 'false'
+ shell: "rpm -q iptables-services || yum --nogpgcheck install -y iptables-services"
+
+ - name: disable firewalld in RHEL7 and Centos7
+ when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7'
+ shell: "(which firewalld && service firewalld stop && chkconfig firewalld off) || true"
+
+ - name: disable selinux on RedHat based OS
+ when: ansible_os_family == 'RedHat'
+ selinux: state=permissive policy=targeted
+
+ - shell: virtualenv --version | grep "12.1.1"
+ register: virtualenv_ret
+ ignore_errors: True
+
+ - name: install virtualenv
+ pip: name=virtualenv version=12.1.1 extra_args="--ignore-installed --trusted-host {{trusted_host}} -i {{pip_url}}"
+ when: virtualenv_ret.rc != 0
+
+ - name: create virtualenv
+ shell: "rm -rf {{virtenv_path}} && rm -rf {{surfsb_root}}/{{pkg_zstacklib}} && rm -f {{surfsb_root}}/{{pkg_surfsbagent}} && virtualenv --system-site-packages {{virtenv_path}}"
+
+ - name: copy zstacklib
+ copy: src="files/zstacklib/{{pkg_zstacklib}}" dest="{{surfsb_root}}/{{pkg_zstacklib}}"
+ notify:
+ - install zstacklib
+
+ - name: copy surfs backupstorage agent
+ copy: src="{{file_root}}/{{pkg_surfsbagent}}" dest="{{surfsb_root}}/{{pkg_surfsbagent}}"
+ notify:
+ - install surfsbagent
+
+ - name: copy service file
+ when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian'
+ copy: src=files/surfsb/zstack-surfs-backupstorage dest=/etc/init.d/ mode=755
+
+ - meta: flush_handlers
+
+ - name: restart surfsbagent
+ service: name=zstack-surfs-backupstorage state=restarted enabled=yes
+
+ handlers:
+ - name: install zstacklib
+ environment:
+ http_proxy: "{{proxy}}"
+ https_proxy: "{{sproxy}}"
+ pip: name="{{surfsb_root}}/{{pkg_zstacklib}}" extra_args="--ignore-installed --trusted-host {{trusted_host}} -i {{pip_url}}" virtualenv="{{virtenv_path}}"
+
+ - name: install surfsbagent
+ environment:
+ http_proxy: "{{proxy}}"
+ https_proxy: "{{sproxy}}"
+ pip: name="{{surfsb_root}}/{{pkg_surfsbagent}}" extra_args="--ignore-installed --trusted-host {{trusted_host}} -i {{pip_url}}" virtualenv="{{virtenv_path}}"
diff --git a/surfsbackupstorage/setup.cfg b/surfsbackupstorage/setup.cfg
new file mode 100644
index 0000000000..861a9f5542
--- /dev/null
+++ b/surfsbackupstorage/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/surfsbackupstorage/setup.py b/surfsbackupstorage/setup.py
new file mode 100644
index 0000000000..bc79824af7
--- /dev/null
+++ b/surfsbackupstorage/setup.py
@@ -0,0 +1,26 @@
+from setuptools import setup, find_packages
+import sys, os
+
+version = '2.2.0'
+
+setup(name='surfsbackupstorage',
+ version=version,
+ description="ZStack SURFS backup storage agent",
+ long_description="""\
+ZStack SURFS backup storage agent""",
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ keywords='surfs zstack',
+ author='zhouhaiping',
+ author_email='zhouhaiping@sursen.net',
+ url='http://zstack.org',
+ license='Apache License 2',
+ packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
+ include_package_data=True,
+ zip_safe=True,
+ install_requires=[
+ # -*- Extra requirements: -*-
+ ],
+ entry_points="""
+ # -*- Entry points: -*-
+ """,
+ )
diff --git a/surfsbackupstorage/surfsbackupstorage/__init__.py b/surfsbackupstorage/surfsbackupstorage/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
diff --git a/surfsbackupstorage/surfsbackupstorage/cdaemon.py b/surfsbackupstorage/surfsbackupstorage/cdaemon.py
new file mode 100755
index 0000000000..bd0bf5420d
--- /dev/null
+++ b/surfsbackupstorage/surfsbackupstorage/cdaemon.py
@@ -0,0 +1,52 @@
+'''
+
+@author: zhou
+'''
+import sys, os, os.path
+import surfsagent
+from zstacklib.utils import log
+from zstacklib.utils import linux
+import zstacklib.utils.iptables as iptables
+
+pidfile = '/var/run/zstack/surfs-backupstorage.pid'
+log.configure_log('/var/log/zstack/surfs-backupstorage.log')
+logger = log.get_logger(__name__)
+
+def prepare_pid_dir(path):
+ pdir = os.path.dirname(path)
+ if not os.path.isdir(pdir):
+ os.makedirs(pdir)
+
+def main():
+ usage = 'usage: python -c "from surfsbackupstorage import cdaemon; cdaemon.main()" start|stop|restart'
+ if len(sys.argv) != 2 or not sys.argv[1] in ['start', 'stop', 'restart']:
+ print usage
+ sys.exit(1)
+
+ global pidfile
+ prepare_pid_dir(pidfile)
+
+ try:
+ iptc = iptables.from_iptables_save()
+ iptc.add_rule('-A INPUT -p tcp -m tcp --dport 6732 -j ACCEPT')
+ iptc.iptable_restore()
+
+ cmd = sys.argv[1]
+ agentdaemon = surfsagent.SurfsDaemon(pidfile)
+ if cmd == 'start':
+ logger.debug('zstack-surfs-backupstorage starts')
+ agentdaemon.start()
+ elif cmd == 'stop':
+ logger.debug('zstack-surfs-backupstorage stops')
+ agentdaemon.stop()
+ elif cmd == 'restart':
+ logger.debug('zstack-surfs-backupstorage restarts')
+ agentdaemon.restart()
+ sys.exit(0)
+ except Exception:
+ logger.warning(linux.get_exception_stacktrace())
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/surfsbackupstorage/surfsbackupstorage/surfsagent.py b/surfsbackupstorage/surfsbackupstorage/surfsagent.py
new file mode 100755
index 0000000000..b32d352311
--- /dev/null
+++ b/surfsbackupstorage/surfsbackupstorage/surfsagent.py
@@ -0,0 +1,316 @@
+__author__ = 'zhouhaiping'
+
+import zstacklib.utils.daemon as daemon
+import zstacklib.utils.http as http
+import zstacklib.utils.log as log
+import zstacklib.utils.shell as shell
+import zstacklib.utils.lichbd as lichbd
+import zstacklib.utils.iptables as iptables
+import zstacklib.utils.jsonobject as jsonobject
+import zstacklib.utils.lock as lock
+import zstacklib.utils.linux as linux
+import zstacklib.utils.sizeunit as sizeunit
+import zstacklib.utils.lichbd_factory as lichbdfactory
+from zstacklib.utils import plugin
+from zstacklib.utils.rollback import rollback, rollbackable
+import os
+import os.path
+import errno
+import functools
+import traceback
+import pprint
+import threading
+import commands
+import json
+import time
+
+logger = log.get_logger(__name__)
+class SurfsCmdManage(object):
+ def __init__(self):
+ pass
+ def get_pool_msg(self):
+ cmdstr='surfs connect'
+ i=0
+ while i < 5:
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret == 0:
+
+ rmsg=json.loads(rslt)
+ if rmsg["success"] is False:
+ i += 1
+ time.sleep(1)
+ continue
+ else:
+ return rmsg["data"]
+ return None
+
+ def download_image_to_surfs(self,url,image_uuid,image_fmt):
+ cmdstr='surfs image-add %s %s %s'%(url,image_fmt,image_uuid)
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+ return True
+
+ def get_iamge_size(self,imageid):
+ cmdstr='surfs image-info ' + imageid
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+ size=rmsg['data']['size']
+ return size
+
+ def delete_image(self,imageuuid):
+ cmdstr='surfs image-del %s'%imageuuid
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.warn(rslt)
+ return False
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ logger.warn(rslt)
+ return False
+ return True
+
+class AgentResponse(object):
+ def __init__(self, success=True, error=None):
+ self.success = success
+ self.error = error if error else ''
+ self.totalCapacity = None
+ self.availableCapacity = None
+
+class InitRsp(AgentResponse):
+ def __init__(self):
+ super(InitRsp, self).__init__()
+ self.fsid = None
+
+class DownloadRsp(AgentResponse):
+ def __init__(self):
+ super(DownloadRsp, self).__init__()
+ self.size = None
+ self.actualSize = None
+
+class GetImageSizeRsp(AgentResponse):
+ def __init__(self):
+ super(GetImageSizeRsp, self).__init__()
+ self.size = None
+ self.actualSize = None
+
+class PingRsp(AgentResponse):
+ def __init__(self):
+ super(PingRsp, self).__init__()
+ self.operationFailure = False
+
+class GetFactsRsp(AgentResponse):
+ def __init__(self):
+ super(GetFactsRsp, self).__init__()
+ self.fsid = None
+
+class GetLocalFileSizeRsp(AgentResponse):
+ def __init__(self):
+ super(GetLocalFileSizeRsp, self).__init__()
+ self.size = None
+
+def replyerror(func):
+ @functools.wraps(func)
+ def wrap(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception as e:
+ content = traceback.format_exc()
+ err = '%s\n%s\nargs:%s' % (str(e), content, pprint.pformat([args, kwargs]))
+ rsp = AgentResponse()
+ rsp.success = False
+ rsp.error = str(e)
+ logger.warn(err)
+ return jsonobject.dumps(rsp)
+ return wrap
+
+class SurfsAgent(object):
+ INIT_PATH = "/surfs/backupstorage/init"
+ DOWNLOAD_IMAGE_PATH = "/surfs/backupstorage/image/download"
+ DELETE_IMAGE_PATH = "/surfs/backupstorage/image/delete"
+ PING_PATH = "/surfs/backupstorage/ping"
+ ECHO_PATH = "/surfs/backupstorage/echo"
+ GET_IMAGE_SIZE_PATH = "/surfs/backupstorage/image/getsize"
+ GET_FACTS = "/surfs/backupstorage/facts"
+ GET_LOCAL_FILE_SIZE = "/surfs/backupstorage/getlocalfilesize"
+
+
+ http_server = http.HttpServer(port=6732)
+ http_server.logfile_path = log.get_logfile_path()
+
+ def __init__(self):
+ self.http_server.register_async_uri(self.INIT_PATH, self.init)
+ self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH, self.download)
+ self.http_server.register_async_uri(self.DELETE_IMAGE_PATH, self.delete)
+ self.http_server.register_async_uri(self.PING_PATH, self.ping)
+ self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH, self.get_image_size)
+ self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
+ self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
+ self.http_server.register_async_uri(self.GET_LOCAL_FILE_SIZE, self.get_local_file_size)
+ self.fsid='surfsc48-2cef-454c-b0d0-b6e6b467c022'
+ self.tmp_image_path='/usr/lib/surfstmpimages'
+ if os.path.exists(self.tmp_image_path) is False:
+ shell.call('mkdir %s'%self.tmp_image_path)
+ self.surfs_mgr = SurfsCmdManage()
+
+ def _normalize_install_path(self, path):
+ return path.lstrip('surfs:').lstrip('//')
+
+ def _set_capacity_to_response(self, rsp):
+ cmdstr='surfs connect'
+ total = 0
+ used = 0
+ rmsg=self.surfs_mgr.get_pool_msg()
+ for pl in rmsg:
+ if pl["success"] is True:
+ total=total + pl["total"]
+ used=used + pl["used"]
+ rsp.totalCapacity = total
+ rsp.availableCapacity = total - used
+
+ def _parse_install_path(self, path):
+ return path.lstrip('surfs:').lstrip('//').split('/')
+
+ @replyerror
+ def echo(self, req):
+ logger.debug('get echoed')
+ return ''
+
+ @replyerror
+ def get_facts(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rsp = GetFactsRsp()
+ rsp.fsid = self.fsid
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def init(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rsp = InitRsp()
+ rsp.fsid = self.fsid
+ self._set_capacity_to_response(rsp)
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ @rollback
+ def download(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ pool, image_name = self._parse_install_path(cmd.installPath)
+ tmp_image_name = 'tmp-%s' % image_name
+
+ formated_file = os.path.join(self.tmp_image_path, image_name)
+ tmp_image_file = os.path.join(self.tmp_image_path, tmp_image_name)
+
+
+ if cmd.url.startswith('http://') or cmd.url.startswith('https://'):
+ cmd.url = linux.shellquote(cmd.url)
+ actual_size = linux.get_file_size_by_http_head(cmd.url)
+ elif cmd.url.startswith('file://'):
+ src_path = cmd.url.lstrip('file:')
+ src_path = os.path.normpath(src_path)
+ if not os.path.isfile(src_path):
+ raise Exception('cannot find the file[%s]' % src_path)
+ actual_size = os.path.getsize(src_path)
+ else:
+ raise Exception('unknown url[%s]' % cmd.url)
+
+ file_format = ''
+ if "raw" in cmd.imageFormat:
+ file_format = 'raw'
+ if "qcow2" in cmd.imageFormat:
+ file_format = 'qcow2'
+ if file_format not in ['qcow2', 'raw']:
+ raise Exception('unknown image format: %s' % file_format)
+
+ if self.surfs_mgr.download_image_to_surfs(cmd.url, image_name,file_format) is False:
+ raise Exception('Can not download image from %s'%cmd.url)
+
+ size = self.surfs_mgr.get_iamge_size(image_name)
+ rsp = DownloadRsp()
+ rsp.size = size
+ rsp.actualSize = actual_size
+ self._set_capacity_to_response(rsp)
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def ping(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rsp = PingRsp()
+
+ if cmd.testImagePath:
+ rmsg=self.surfs_mgr.get_pool_msg()
+ if rmsg is None:
+ rsp.success = False
+ rsp.operationFailure = True
+ rsp.error = "can not to do surfs connect"
+ logger.debug("%s" % rsp.error)
+ else:
+ if len(rmsg)> 0:
+ for rsg in rmsg:
+ if rsg['success'] is False:
+ rsp.success = False
+ rsp.operationFailure = True
+ rsp.error = "Surfs is ready,but pool is breaken"
+ logger.debug("Surfs is ready,but pool is breaken")
+ break
+ else:
+ rsp.success = False
+ rsp.operationFailure = True
+ rsp.error = "Surfs is ready,but pool is Null"
+ logger.debug("Surfs is ready,but pool is Null")
+
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def delete(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ pool, image_name = self._parse_install_path(cmd.installPath)
+ self.surfs_mgr.delete_image(image_name)
+
+ rsp = AgentResponse()
+ self._set_capacity_to_response(rsp)
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def get_local_file_size(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rsp = GetLocalFileSizeRsp()
+ filedir=cmd.path[7:]
+ if os.path.exists(filedir):
+ rsp.size = linux.get_local_file_size(filedir)
+ else:
+ rsp.size=0
+ rsp.success=False
+ rsp.error ="The file is not exist"
+ return jsonobject.dumps(rsp)
+
+ def _get_file_size(self,pool,image_name):
+ return self.surfs_mgr.get_iamge_size(image_name)
+
+ @replyerror
+ def get_image_size(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rsp = GetImageSizeRsp()
+ pool, image_name = self._parse_install_path(cmd.installPath)
+ rsp.size = self._get_file_size(pool,image_name)
+ return jsonobject.dumps(rsp)
+
+
+
+class SurfsDaemon(daemon.Daemon):
+ def __init__(self, pidfile):
+ super(SurfsDaemon, self).__init__(pidfile)
+
+ def run(self):
+ self.agent = SurfsAgent()
+ self.agent.http_server.start()
+
+
+
diff --git a/surfsbackupstorage/zstack-surfs-backupstorage b/surfsbackupstorage/zstack-surfs-backupstorage
new file mode 100755
index 0000000000..a81cc19b9f
--- /dev/null
+++ b/surfsbackupstorage/zstack-surfs-backupstorage
@@ -0,0 +1,52 @@
+#!/bin/sh
+
+# the following is chkconfig init header
+#
+# zstack-surfs-backupstorage: zstack surfs backup storage agent daemon
+#
+# chkconfig: 345 97 03
+# description: This is a daemon instructed by zstack management server \
+# to perform backup storage related operations\
+# See http://zstack.org
+#
+# processname: zstack-surfs-backupstorage
+# pidfile: /var/run/zstack/surfs-backupstorage.pid
+#
+
+check_status() {
+ pidfile='/var/run/zstack/surfs-backupstorage.pid'
+ if [ ! -f $pidfile ]; then
+ echo "zstack surfs-backupstorage agent is stopped"
+ exit 1
+ else
+ pid=`cat $pidfile`
+ ps -p $pid > /dev/null
+ if [ $? -eq 0 ]; then
+ echo "zstack surfs-backupstorage agent is running, pid is $pid"
+ exit 0
+ else
+ echo "zstack surfs-backupstorage is stopped, but pidfile at $pidfile is not cleaned. It may be caused by the agent crashed at last time, manually cleaning it would be ok"
+ exit 1
+ fi
+ fi
+}
+
+if [ $# -eq 0 ]; then
+ echo "usage: $0
+[start|stop|restart|status]"
+ exit 1
+fi
+
+if [ "$@" = "status" ]; then
+ check_status
+else
+ . /var/lib/zstack/virtualenv/surfsb/bin/activate && python -c "from surfsbackupstorage import cdaemon; cdaemon.main()" $@
+fi
+
+if [ $? -eq 0 ]; then
+ echo "$@ zstack surfs-backupstorage agent .... SUCCESS"
+ exit 0
+else
+ echo "$@ zstack surfs-backupstorage agent .... FAILED"
+ exit 1
+fi
diff --git a/surfsprimarystorage/ansible/surfsp.py b/surfsprimarystorage/ansible/surfsp.py
new file mode 100644
index 0000000000..8553fd644c
--- /dev/null
+++ b/surfsprimarystorage/ansible/surfsp.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+# encoding: utf-8
+import argparse
+from zstacklib import *
+
+# create log
+logger_dir = "/var/log/zstack/"
+create_log(logger_dir)
+banner("Starting to deploy surfs primary agent")
+start_time = datetime.now()
+# set default value
+file_root = "files/surfsp"
+pip_url = "https=//pypi.python.org/simple/"
+proxy = ""
+sproxy = ""
+zstack_repo = 'false'
+post_url = ""
+pkg_surfspagent = ""
+virtualenv_version = "12.1.1"
+remote_user = "root"
+remote_pass = None
+remote_port = None
+
+
+# get parameter from shell
+parser = argparse.ArgumentParser(description='Deploy surfs primary strorage to host')
+parser.add_argument('-i', type=str, help="""specify inventory host file
+ default=/etc/ansible/hosts""")
+parser.add_argument('--private-key', type=str, help='use this file to authenticate the connection')
+parser.add_argument('-e', type=str, help='set additional variables as key=value or YAML/JSON')
+
+args = parser.parse_args()
+argument_dict = eval(args.e)
+
+# update the variable from shell arguments
+locals().update(argument_dict)
+virtenv_path = "%s/virtualenv/surfsp/" % zstack_root
+surfsp_root = "%s/surfsp/package" % zstack_root
+host_post_info = HostPostInfo()
+host_post_info.host_inventory = args.i
+host_post_info.host = host
+host_post_info.post_url = post_url
+host_post_info.private_key = args.private_key
+host_post_info.remote_user = remote_user
+host_post_info.remote_pass = remote_pass
+host_post_info.remote_port = remote_port
+if remote_pass is not None and remote_user != 'root':
+ host_post_info.become = True
+
+# include zstacklib.py
+(distro, distro_version, distro_release) = get_remote_host_info(host_post_info)
+zstacklib_args = ZstackLibArgs()
+zstacklib_args.distro = distro
+zstacklib_args.distro_release = distro_release
+zstacklib_args.distro_version = distro_version
+zstacklib_args.zstack_repo = zstack_repo
+zstacklib_args.yum_server = yum_server
+zstacklib_args.zstack_root = zstack_root
+zstacklib_args.host_post_info = host_post_info
+zstacklib_args.pip_url = pip_url
+zstacklib_args.trusted_host = trusted_host
+zstacklib = ZstackLib(zstacklib_args)
+
+
+# name: judge this process is init install or upgrade
+if file_dir_exist("path=" + surfsp_root, host_post_info):
+ init_install = False
+else:
+ init_install = True
+ # name: create root directories
+ command = 'mkdir -p %s %s' % (surfsp_root, virtenv_path)
+ run_remote_command(command, host_post_info)
+
+run_remote_command("rm -rf %s/*" % surfsp_root, host_post_info)
+
+if distro == "RedHat" or distro == "CentOS":
+ if zstack_repo != 'false':
+ command = ("pkg_list=`rpm -q wget qemu-img-ev libvirt libguestfs-winsupport libguestfs-tools"
+ " | grep \"not installed\" | awk '{ print $2 }'` && for pkg"
+ " in $pkg_list; do yum --disablerepo=* --enablerepo=%s install -y $pkg; done;") % zstack_repo
+ run_remote_command(command, host_post_info)
+ if distro_version >= 7:
+ command = "(which firewalld && service firewalld stop && chkconfig firewalld off) || true"
+ run_remote_command(command, host_post_info)
+ else:
+ for pkg in ["wget", "qemu-img-ev", "libvirt", "libguestfs-winsupport", "libguestfs-tools"]:
+ yum_install_package(pkg, host_post_info)
+ if distro_version >= 7:
+ command = "(which firewalld && service firewalld stop && chkconfig firewalld off) || true"
+ run_remote_command(command, host_post_info)
+ set_selinux("state=disabled", host_post_info)
+ # name: enable libvirt daemon on RedHat based OS
+ service_status("libvirtd", "state=started enabled=yes", host_post_info)
+ if distro_version >= 7:
+ # name: enable virtlockd daemon on RedHat based OS
+ service_status("virtlockd", "state=started enabled=yes", host_post_info)
+ # name: copy sysconfig libvirtd conf in RedHat
+ copy_arg = CopyArg()
+ copy_arg.src = "%s/../kvm/libvirtd" % file_root
+ copy_arg.dest = "/etc/sysconfig/libvirtd"
+ libvirtd_status = copy(copy_arg, host_post_info)
+
+elif distro == "Debian" or distro == "Ubuntu":
+ install_pkg_list = ["wget", "qemu-utils","libvirt-bin", "libguestfs-tools"]
+ apt_install_packages(install_pkg_list, host_post_info)
+ command = "(chmod 0644 /boot/vmlinuz*) || true"
+ run_remote_command(command, host_post_info)
+ # name: copy default libvirtd conf in Debian
+ copy_arg = CopyArg()
+ copy_arg.src = "%s/../kvm/libvirt-bin" % file_root
+ copy_arg.dest = '/etc/default/libvirt-bin'
+ libvirt_bin_status = copy(copy_arg, host_post_info)
+ if libvirt_bin_status != "changed:False":
+ # name: restart debian libvirtd
+ service_status("libvirt-bin", "state=restarted enabled=yes", host_post_info)
+else:
+ error("unsupported OS!")
+
+# name: install virtualenv
+virtual_env_status = check_and_install_virtual_env(virtualenv_version, trusted_host, pip_url, host_post_info)
+if virtual_env_status is False:
+ command = "rm -rf %s && rm -rf %s" % (virtenv_path, surfsp_root)
+ run_remote_command(command, host_post_info)
+ sys.exit(1)
+
+# name: make sure virtualenv has been setup
+command = "[ -f %s/bin/python ] || virtualenv --system-site-packages %s " % (virtenv_path, virtenv_path)
+run_remote_command(command, host_post_info)
+
+# name: copy zstacklib and install
+copy_arg = CopyArg()
+copy_arg.src = "files/zstacklib/%s" % pkg_zstacklib
+copy_arg.dest = "%s/%s" % (surfsp_root, pkg_zstacklib)
+zstack_lib_copy = copy(copy_arg, host_post_info)
+if zstack_lib_copy != "changed:False":
+ agent_install_arg = AgentInstallArg(trusted_host, pip_url, virtenv_path, init_install)
+ agent_install_arg.agent_name = "zstacklib"
+ agent_install_arg.agent_root = surfsp_root
+ agent_install_arg.pkg_name = pkg_zstacklib
+ agent_install(agent_install_arg, host_post_info)
+
+# name: copy surfs primarystorage agent
+copy_arg = CopyArg()
+copy_arg.src = "%s/%s" % (file_root, pkg_surfspagent)
+copy_arg.dest = "%s/%s" % (surfsp_root, pkg_surfspagent)
+surfspagent_copy = copy(copy_arg, host_post_info)
+if surfspagent_copy != "changed:False":
+ agent_install_arg = AgentInstallArg(trusted_host, pip_url, virtenv_path, init_install)
+ agent_install_arg.agent_name = "surfs_primarystorage"
+ agent_install_arg.agent_root = surfsp_root
+ agent_install_arg.pkg_name = pkg_surfspagent
+ agent_install(agent_install_arg, host_post_info)
+
+# name: copy service file
+# only support centos redhat debian and ubuntu
+copy_arg = CopyArg()
+copy_arg.src = "%s/zstack-surfs-primarystorage" % file_root
+copy_arg.dest = "/etc/init.d/"
+copy_arg.args = "mode=755"
+copy(copy_arg, host_post_info)
+# name: restart surfspagent
+if distro == "RedHat" or distro == "CentOS":
+ command = "service zstack-surfs-primarystorage stop && service zstack-surfs-primarystorage start" \
+ " && chkconfig zstack-surfs-primarystorage on"
+elif distro == "Debian" or distro == "Ubuntu":
+ command = "update-rc.d zstack-surfs-primarystorage start 97 3 4 5 . stop 3 0 1 2 6 . && service zstack-surfs-primarystorage stop && service zstack-surfs-primarystorage start"
+run_remote_command(command, host_post_info)
+# change surfs config
+set_ini_file("/etc/surfs/surfs.conf", 'global', "rbd_default_format", "2", host_post_info)
+
+# name: remove libvirt default bridge
+command = '(ifconfig virbr0 &> /dev/null && virsh net-destroy default > ' \
+ '/dev/null && virsh net-undefine default > /dev/null) || true'
+host_post_info.post_label = "ansible.shell.virsh.destroy.bridge"
+host_post_info.post_label_param = None
+run_remote_command(command, host_post_info)
+
+# name: copy libvirtd conf
+copy_arg = CopyArg()
+copy_arg.src = "%s/../kvm/libvirtd.conf" % file_root
+copy_arg.dest = "/etc/libvirt/libvirtd.conf"
+libvirtd_conf_status = copy(copy_arg, host_post_info)
+
+# name: copy qemu conf
+copy_arg = CopyArg()
+copy_arg.src = "%s/../kvm/qemu.conf" % file_root
+copy_arg.dest = "/etc/libvirt/qemu.conf"
+qemu_conf_status = copy(copy_arg, host_post_info)
+
+# name: delete A2 qemu hook
+command = "rm -f /etc/libvirt/hooks/qemu"
+host_post_info.post_label = "ansible.shell.remove.file"
+host_post_info.post_label_param = "/etc/libvirt/hooks/qemu"
+run_remote_command(command, host_post_info)
+
+# name: restart libvirt
+if distro == "RedHat" or distro == "CentOS":
+ if libvirtd_status != "changed:False" or libvirtd_conf_status != "changed:False" \
+ or qemu_conf_status != "changed:False":
+ # name: restart redhat libvirtd
+ service_status("libvirtd", "state=restarted enabled=yes", host_post_info)
+elif distro == "Debian" or distro == "Ubuntu":
+ if libvirt_bin_status != "changed:False" or libvirtd_conf_status != "changed:False" \
+ or qemu_conf_status != "changed:False":
+ # name: restart debian libvirtd
+ service_status("libvirt-bin", "state=restarted enabled=yes", host_post_info)
+run_remote_command(command, host_post_info)
+
+
+host_post_info.start_time = start_time
+handle_ansible_info("SUCC: Deploy surfs primary agent successful", host_post_info, "INFO")
+
+sys.exit(0)
diff --git a/surfsprimarystorage/ansible/surfsp.yaml b/surfsprimarystorage/ansible/surfsp.yaml
new file mode 100644
index 0000000000..4a60409ff6
--- /dev/null
+++ b/surfsprimarystorage/ansible/surfsp.yaml
@@ -0,0 +1,94 @@
+---
+
+- hosts: "{{host}}"
+ vars:
+ - virtenv_path: "{{zstack_root}}/virtualenv/surfsp/"
+ - surfsp_root: "{{zstack_root}}/surfsp"
+ - file_root: "files/surfsp"
+ - pip_url: "{{pypi_url|default('https://pypi.python.org/simple/')}}"
+ - proxy: "{{http_proxy|default()}}"
+ - sproxy: "{{https_proxy|default()}}"
+ - yum_repos: "{{yum_repo|default('false')}}"
+
+ tasks:
+ - include: zstacklib.yaml
+
+ - name: create root directories
+ shell: "mkdir -p {{item}}"
+ with_items:
+ - "{{surfsp_root}}"
+ - "{{virtenv_path}}"
+
+ - name: install dependent packages on RedHat based OS from user defined repos
+ when: ansible_os_family == 'RedHat' and yum_repos != 'false'
+ shell: "yum --disablerepo=* --enablerepo={{yum_repos}} --nogpgcheck install -y wget qemu-img"
+
+ - name: install dependent packages on RedHat based OS from online
+ when: ansible_os_family == 'RedHat' and yum_repos == 'false'
+ shell: "yum --nogpgcheck install -y wget qemu-img"
+
+ - name: install dependent packages on Debian based OS
+ when: ansible_os_family == 'Debian'
+ apt: pkg="{{item}}"
+ with_items:
+ - wget
+ - qemu-utils
+
+ - name: RHEL7 specific packages from user defined repos
+ when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repos != 'false'
+ shell: "rpm -q iptables-services || yum --disablerepo=* --enablerepo={{yum_repos}} --nogpgcheck install -y iptables-services"
+
+ - name: RHEL7 specific packages from online
+ when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repos == 'false'
+ shell: "rpm -q iptables-services || yum --nogpgcheck install -y iptables-services"
+
+ - name: disable firewalld in RHEL7 and Centos7
+ when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7'
+ shell: "(which firewalld && service firewalld stop && chkconfig firewalld off) || true"
+
+ - name: disable selinux on RedHat based OS
+ when: ansible_os_family == 'RedHat'
+ selinux: state=permissive policy=targeted
+
+ - shell: virtualenv --version | grep "12.1.1"
+ register: virtualenv_ret
+ ignore_errors: True
+
+ - name: install virtualenv
+ pip: name=virtualenv version=12.1.1 extra_args="--ignore-installed --trusted-host {{trusted_host}} -i {{pip_url}}"
+ when: virtualenv_ret.rc != 0
+
+ - name: create virtualenv
+ shell: "rm -rf {{virtenv_path}} && rm -rf {{surfsp_root}}/{{pkg_zstacklib}} && rm -f {{surfsp_root}}/{{pkg_surfspagent}} && virtualenv --system-site-packages {{virtenv_path}}"
+
+ - name: copy zstacklib
+ copy: src="files/zstacklib/{{pkg_zstacklib}}" dest="{{surfsp_root}}/{{pkg_zstacklib}}"
+ notify:
+ - install zstacklib
+
+ - name: copy surfs primarystorage agent
+ copy: src="{{file_root}}/{{pkg_surfspagent}}" dest="{{surfsp_root}}/{{pkg_surfspagent}}"
+ notify:
+ - install surfspagent
+
+ - name: copy service file
+ when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian'
+ copy: src=files/surfsp/zstack-surfs-primarystorage dest=/etc/init.d/ mode=755
+
+ - meta: flush_handlers
+
+ - name: restart surfspagent
+ service: name=zstack-surfs-primarystorage state=restarted enabled=yes
+
+ handlers:
+ - name: install zstacklib
+ environment:
+ http_proxy: "{{proxy}}"
+ https_proxy: "{{sproxy}}"
+ pip: name="{{surfsp_root}}/{{pkg_zstacklib}}" extra_args="--ignore-installed --trusted-host {{trusted_host}} -i {{pip_url}}" virtualenv="{{virtenv_path}}"
+
+ - name: install surfspagent
+ environment:
+ http_proxy: "{{proxy}}"
+ https_proxy: "{{sproxy}}"
+ pip: name="{{surfsp_root}}/{{pkg_surfspagent}}" extra_args="--ignore-installed --trusted-host {{trusted_host}} -i {{pip_url}}" virtualenv="{{virtenv_path}}"
diff --git a/surfsprimarystorage/setup.cfg b/surfsprimarystorage/setup.cfg
new file mode 100644
index 0000000000..861a9f5542
--- /dev/null
+++ b/surfsprimarystorage/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/surfsprimarystorage/setup.py b/surfsprimarystorage/setup.py
new file mode 100644
index 0000000000..7ca726ee65
--- /dev/null
+++ b/surfsprimarystorage/setup.py
@@ -0,0 +1,26 @@
+from setuptools import setup, find_packages
+import sys, os
+
+version = '2.2.0'
+
+setup(name='surfsprimarystorage',
+ version=version,
+ description="ZStack surfs primary storage",
+ long_description="""\
+ZStack surfs primary storage""",
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ keywords='zstack surfs',
+ author='zhou',
+ author_email='zhouhaiping@sursen.net',
+ url='http://zstack.org',
+ license='Apache License 2',
+ packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
+ include_package_data=True,
+ zip_safe=True,
+ install_requires=[
+ # -*- Extra requirements: -*-
+ ],
+ entry_points="""
+ # -*- Entry points: -*-
+ """,
+ )
diff --git a/surfsprimarystorage/surfsprimarystorage/__init__.py b/surfsprimarystorage/surfsprimarystorage/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
diff --git a/surfsprimarystorage/surfsprimarystorage/cdaemon.py b/surfsprimarystorage/surfsprimarystorage/cdaemon.py
new file mode 100755
index 0000000000..e89423d115
--- /dev/null
+++ b/surfsprimarystorage/surfsprimarystorage/cdaemon.py
@@ -0,0 +1,52 @@
+'''
+
+@author: zhouhaiping
+'''
+import sys, os, os.path
+import surfsagent
+from zstacklib.utils import log
+from zstacklib.utils import linux
+import zstacklib.utils.iptables as iptables
+
+pidfile = '/var/run/zstack/surfs-primarystorage.pid'
+log.configure_log('/var/log/zstack/surfs-primarystorage.log')
+logger = log.get_logger(__name__)
+
+def prepare_pid_dir(path):
+ pdir = os.path.dirname(path)
+ if not os.path.isdir(pdir):
+ os.makedirs(pdir)
+
+def main():
+ usage = 'usage: python -c "from surfsprimarystorage import cdaemon; cdaemon.main()" start|stop|restart'
+ if len(sys.argv) != 2 or not sys.argv[1] in ['start', 'stop', 'restart']:
+ print usage
+ sys.exit(1)
+
+ global pidfile
+ prepare_pid_dir(pidfile)
+
+ try:
+ iptc = iptables.from_iptables_save()
+ iptc.add_rule('-A INPUT -p tcp -m tcp --dport 6731 -j ACCEPT')
+ iptc.iptable_restore()
+
+ cmd = sys.argv[1]
+ agentdaemon = surfsagent.SurfsDaemon(pidfile)
+ if cmd == 'start':
+ logger.debug('zstack-surfs-primarystorage starts')
+ agentdaemon.start()
+ elif cmd == 'stop':
+ logger.debug('zstack-surfs-primarystorage stops')
+ agentdaemon.stop()
+ elif cmd == 'restart':
+ logger.debug('zstack-surfs-primarystorage restarts')
+ agentdaemon.restart()
+ sys.exit(0)
+ except Exception:
+ logger.warning(linux.get_exception_stacktrace())
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/surfsprimarystorage/surfsprimarystorage/surfsagent.py b/surfsprimarystorage/surfsprimarystorage/surfsagent.py
new file mode 100755
index 0000000000..195adab3c2
--- /dev/null
+++ b/surfsprimarystorage/surfsprimarystorage/surfsagent.py
@@ -0,0 +1,1279 @@
+__author__ = 'frank'
+
+import zstacklib.utils.daemon as daemon
+import zstacklib.utils.http as http
+import zstacklib.utils.log as log
+import zstacklib.utils.shell as shell
+import zstacklib.utils.lichbd as lichbd
+import zstacklib.utils.iptables as iptables
+import zstacklib.utils.jsonobject as jsonobject
+import zstacklib.utils.lock as lock
+import zstacklib.utils.linux as linux
+import zstacklib.utils.sizeunit as sizeunit
+from zstacklib.utils import plugin
+from zstacklib.utils.rollback import rollback, rollbackable
+import os
+import errno
+import functools
+import traceback
+import pprint
+import threading
+import commands
+import json
+import time
+logger = log.get_logger(__name__)
+
+class SurfsCmdManage(object):
+ def __init__(self):
+ self.back_type='hdd'
+ self.iqn_name='iqn.2017-10.org.surfs:'
+ self.iscsi_port='3262'
+ self.target_path='/surfs_storage'
+ self.init_name=self._get_local_iscsi_initname()
+ pooldata=self.get_pool_msg()
+ psign = []
+ if pooldata is None:
+ logger.debug('Surfs is not ready')
+ return
+ else:
+ for xpool in pooldata:
+ if xpool['success'] is False:
+ logger.debug('Surfs is ready but pool %s is not ready'%xpool['pool'])
+ else:
+ psign.append(xpool['pool'])
+ if len(psign) == 0:
+ logger.debug('no pool is ready')
+ raise
+ logger.debug('Surfs start complete!!!')
+ #self._resume_local_target(psign)
+ #logger.debug('Local target reconnect complete!')
+
+ def _resume_local_target(self,pools):
+ if os.path.exists(self.target_path) is False:
+ return
+ storage_dir=os.listdir(self.target_path)
+ for xf in storage_dir:
+ xdir=os.path.join(self.target_path,xf)
+ if os.path.isfile(xdir):
+ continue
+ vol_ids=os.listdir(xdir)
+ for vl in vol_ids:
+ vol_dir=os.path.join(xdir,vl)
+ vol_pool_ip=self.get_volume_pool_ip(vl)
+ if vol_pool_ip is None:
+ self._clean_overdue_target(self.iqn_name,vl)
+ self._clean_overdue_link(vol_dir)
+ continue
+
+ expmsg=self.get_exprot_msg(vl)
+ if expmsg is None:
+ self._clean_overdue_target(self.iqn_name,vl)
+ self._clean_overdue_link(vol_dir)
+ continue
+
+ if self.init_name != expmsg['InitiatorName']:
+ self._clean_overdue_target(self.iqn_name,vl)
+ self._clean_overdue_link(vol_dir)
+ continue
+
+ newip=self._check_target(self.iqn_name, self.init_name,'a12345678:a12345678', vl,vol_dir)
+ if newip is None:
+ continue
+ oldip=self._get_target_ip(self.iqn_name + vl)
+
+ if oldip != newip:
+ self._clean_overdue_target(self.iqn_name,vl)
+ self._connect_root_target(vl, newip,self.iqn_name + vl,self.iscsi_port)
+ self._find_target_path(vl)
+ self._clean_overdue_link(vol_dir)
+ self._link_local_target(vol_dir, vl, newip)
+ else:
+ self._login_local_target(self.iqn_name + vl)
+
+ def _update_target_ip(self,target_name,newip):
+ cmdstr='iscsiadm -m node -T ' + target_name + ' -o update --name node.discovery_address --value=' + newip
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Failed to do :' + cmdstr + '[' + rslt + ']')
+
+ def _get_target_ip(self,target_name):
+ cmdstr='iscsiadm -m node -T ' + target_name + ' |grep node.discovery_address'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Failed to do :' + cmdstr + '[' + rslt + ']')
+ return None
+ if 'No records found' in rslt:
+ return None
+ return rslt.split('=')[1]
+
+ def _loginout_local_target(self,target_name):
+ cmdstr='iscsiadm -m node -T ' + target_name + ' -u'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Failed to do :' + cmdstr + '[' + rslt + ']')
+
+ def _login_local_target(self,target_name):
+ cmdstr='iscsiadm -m node -T ' + target_name + ' -l'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Failed to do :' + cmdstr + '[' + rslt + ']')
+
+ def _link_local_target(self,vol_dir,volname,newip):
+ iscsipath='/dev/disk/by-path/ip-' + newip + ':' + self.iscsi_port + '-iscsi-' + self.iqn_name + volname + '-lun-0'
+ cmdstr='ln -s ' + iscsipath + ' ' + vol_dir
+ ret,rslt=commands.getstatusoutput(cmdstr)
+
+ def _clean_overdue_target(self,iqnname,volname):
+ self._loginout_local_target(iqnname + volname)
+ cmdstr='iscsiadm -m node -T ' + iqnname + volname + ' -o delete'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Failed to do :' + cmdstr + '[' + rslt + ']')
+
+ def _clean_overdue_link(self,overdir):
+ cmdstr='rm -f ' + overdir
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Failed to do :' + cmdstr + '[' + rslt + ']')
+
+ def _check_target(self,iqn_name,initor_name,chap_info,volname,linkdir):
+ cmdstr='surfs check_export ' + iqn_name + volname + ' ' + initor_name + ' ' + chap_info + ' ' + volname
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Error:[%s] for [%s]'%(rslt,cmdstr))
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ if 'volume not find' in rmsg['message']:
+ self._clean_overdue_link(linkdir)
+ return None
+ return rmsg['data']['ip']
+
+ def get_pool_msg(self):
+ cmdstr='surfs connect'
+ i=0
+ while i < 5:
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret == 0:
+ rmsg=json.loads(rslt)
+ if rmsg["success"] is False:
+ i += 1
+ time.sleep(1)
+ continue
+ else:
+ return rmsg["data"]
+ return None
+
+ def get_volume_pool_ip(self,volname):
+ cmdstr= 'surfs volume ' + volname
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ return None
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ return None
+
+ return rmsg["data"]["ip"]
+
+ def download_image_to_surfs(self,url,image_uuid,image_fmt):
+ cmdstr='surfs image-add %s %s %s'%(url,image_fmt,image_uuid)
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+ return True
+
+ def get_iamge_size(self,imageid):
+ cmdstr='surfs image-info ' + imageid
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+ size=rmsg['data']['size']
+ return size
+
+ def delete_image(self,imageuuid):
+ cmdstr='surfs image-del %s'%imageuuid
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+ return True
+
+ def get_storage_ip(self,mg_ip):
+ return mg_ip
+
+ def create_data_volume(self,vol_id,sizestr,typestr,ipstr=None):
+ cmdstr=''
+ if ipstr is None:
+ cmdstr='surfs create -T %s -V %s %s'%(typestr,sizestr,vol_id)
+ else:
+ cmdstr='surfs create -T %s -V %s -P %s %s'%(typestr,sizestr,ipstr,vol_id)
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+ return True
+
+ def clone_vol(self,src_id,dst_id):
+ cmdstr='surfs --pretty copy %s %s'%(src_id,dst_id)
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+ return True
+
+ def create_vol_from_snap(self,snap_id,vol_id):
+ s_msg=self.get_snap_info(s_id)
+ vol_size=s_msg['srcsize']
+ size_G= sizeunit.Byte.toGigaByte(vol_size) + 1
+ cmdstr='surfs snap_to_volume ' + snap_id + ' ' + vol_id + ' ' + size_G
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+ return True
+
+ def get_vol_size(self,vol_id):
+ cmdstr='surfs --pretty volume %s'%vol_id
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+
+ return rmsg['data']['size']
+
+ def _get_pool_list(self):
+ cmdstr='surfs info'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+
+ return rmsg['data']['pools']
+
+ def get_vol_used_size(self,vol_id):
+ return '0'
+
+ def get_snap_info(self,snap_id):
+ cmdstr='surfs snapinfo ' + snap_id
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+ return rmsg
+
+ def get_snap_exist_byvol(self,vol_id):
+ poollist=self._get_pool_list()
+ s_list=[]
+ tstr=''
+ for s in poollist:
+ if s['connected'] is False:
+ continue
+
+ cmdstr='surfs snaplist ' + s['pool']
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+ for x in rmsg['data']:
+ if x['volume']== vol_id:
+ if str(x['ctime']) in tstr:
+ continue
+ else:
+ s_list.append(x)
+ tstr=tstr + ',' + str(x['ctime'])
+
+ return s_list
+
+ def get_after_snaps(self,vol_id,snap_id):
+ snaps=self.get_snap_exist_byvol(vol_id)
+ snap=self.get_snap_info(snap_id)
+ afters=[]
+ for x in snaps:
+ if x['ctime'] > snap['data']['ctime']:
+ afters.append(x)
+ return afters
+
+
+ def create_snapshot(self,vol_id,snap_id):
+ cmdstr='surfs snap ' + vol_id + ' ' + snap_id
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+
+ def delete_snapshot(self,snap_id):
+ cmdstr = 'surfs delete ' + snap_id
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.warn("Failed to delete volume or snapshot[%s]"%vol_id)
+ return False
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ if 'not found' in rmsg['message']:
+ logger.warn(rmsg['message'])
+ return False
+ else:
+ logger.warn("Failed to delete volume or snapshot[%s]"%vol_id)
+ return False
+ return True
+
+ def rollback_from_snap(self,snap_id,vol_id):
+ cmdstr='surfs snaprollback ' + vol_id + ' ' + snap_id
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+
+ def clone_image(self,image_id,vol_id):
+ cmdstr='surfs image-clone ' + image_id + ' ' + vol_id
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+
+ def delete_volume(self,vol_id):
+ self.disexport_volume(vol_id)
+ cmdstr = 'surfs delete ' + vol_id
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.warn("Failed to delete volume or snapshot[%s]"%vol_id)
+ return False
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ logger.warn("Failed to delete volume or snapshot[%s]"%vol_id)
+ return False
+ return True
+
+ def disexport_volume(self,vol_id):
+ cmdstr = 'surfs disexport ' + vol_id
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ return False
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ return False
+
+ def export_target(self,initname,vol_name):
+ cmdstr='surfs export ' + self.iqn_name + vol_name + ' ' + initname + ' a12345678:a12345678 ' + vol_name
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+
+ return rmsg['data']['ip']
+
+ def _get_local_iscsi_initname(self):
+ ret,result=commands.getstatusoutput('cat /etc/iscsi/initiatorname.iscsi')
+ if ret >0:
+ return None
+ if 'InitiatorName' in result and '=' in result:
+ return result.split('=')[1].strip()
+ else:
+ return None
+
+ def export_root_target(self,initname,vol_name,poolname):
+ cmdstr='surfs export ' + self.iqn_name + vol_name + ' ' + initname + ' a12345678:a12345678 ' + vol_name
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ raise Exception('Error:%s'%rslt)
+ if os.path.exists(self.target_path) is False:
+ commands.getstatusoutput('mkdir ' + self.target_path)
+ ppath=os.path.join(self.target_path,poolname)
+ if os.path.exists(ppath) is False:
+ commands.getstatusoutput('mkdir ' + ppath)
+ iscsi_name=self.iqn_name + vol_name
+ self._connect_root_target(vol_name, rmsg['data']['ip'], iscsi_name, self.iscsi_port)
+ self._link_target_to_rootpath(ppath, vol_name,rmsg['data']['ip'])
+
+ return rmsg['data']['ip']
+
+ def _find_target_path(self,volname):
+ cmdstr='find /dev -type l -print0 | xargs --null file | grep -e ' + self.iqn_name + volname
+ k=0
+ while True:
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ k=k + 1
+ if k < 5:
+ time.sleep(0.3)
+ continue
+ else:
+ raise Exception('Error:%s'%rslt)
+ return rslt
+
+ def _link_target_to_rootpath(self,rootpath,volname,poolip):
+ linkstrs=self._find_target_path(volname)
+ if len(linkstrs) ==0:
+ return
+ lun=self._parse_link_paths(linkstrs)
+ iscsipath='/dev/disk/by-path/ip-' + poolip + ':' + self.iscsi_port + '-iscsi-' + self.iqn_name + volname + '-lun-' + lun
+ cmdstr=''
+ if volname in rootpath:
+ cmdstr='ln -s ' + iscsipath + ' ' + rootpath
+ else:
+ cmdstr='ln -s ' + iscsipath + ' ' + rootpath + '/' + volname
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+
+ def _discovery_targets(self,st_ip,volname):
+ cmdstr='iscsiadm -m discovery -t sendtargets -p ' + st_ip + ':' + self.iscsi_port
+ k=0
+ while k < 5:
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ k = k +1
+ time.sleep(0.3)
+ logger.debug('warning:%s'%rslt)
+ continue
+ else:
+ if volname in rslt:
+ break
+ else:
+ k= k + 1
+ time.sleep(0.3)
+ logger.debug('warning:%s'%rslt)
+ continue
+ if k==5:
+ raise Exception('Error:can not execute[%s]'%cmdstr)
+
+ def set_target_chap(self,st_ip,volname):
+ iscsi_name=self.iqn_name + volname
+ self._discovery_targets(st_ip,volname)
+ cmdstr='iscsiadm -m node -T ' + iscsi_name + ' -o update --name node.session.auth.authmethod --value=CHAP'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Error:%s'%rslt)
+
+ cmdstr='iscsiadm -m node -T ' + iscsi_name + ' -o update --name node.session.auth.username --value=a12345678'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Error:%s'%rslt)
+
+ cmdstr='iscsiadm -m node -T ' + iscsi_name + ' -o update --name node.session.auth.password --value=a12345678'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Error:%s'%rslt)
+
+ cmdstr='iscsiadm -m node -T ' + iscsi_name + ' -l'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+ else:
+ logger.debug('OK:%s'%rslt)
+
+ def _connect_root_target(self,vol_name,st_ip,iscsi_name,iscsi_port):
+ self._discovery_targets(st_ip,vol_name)
+ cmdstr='iscsiadm -m node -T ' + iscsi_name + ' -o update --name node.session.auth.authmethod --value=CHAP'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+
+ cmdstr='iscsiadm -m node -T ' + iscsi_name + ' -o update --name node.session.auth.username --value=a12345678'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+
+ cmdstr='iscsiadm -m node -T ' + iscsi_name + ' -o update --name node.session.auth.password --value=a12345678'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+
+ cmdstr='iscsiadm -m node -T ' + iscsi_name + ' -l'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ raise Exception('Error:%s'%rslt)
+
+ def clean_target_after_detach(self,pool,volname):
+
+ iscsi_name=self.iqn_name + volname
+ cmdstr='rm -f ' + self.target_path + '/' + pool + '/' + volname
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Error:%s'%rslt)
+
+ cmdstr='iscsiadm -m node -T ' + iscsi_name + ' -u'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Error:%s'%rslt)
+
+ cmdstr='iscsiadm -m node -T ' + iscsi_name + ' -o delete'
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Error:%s'%rslt)
+
+ cmdstr='surfs disexport ' + volname
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Error:%s'%rslt)
+
+ def get_exprot_msg(self,vol_id):
+ cmdstr='surfs getexport ' + vol_id
+ k =0
+ while True:
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ time.sleep(0.3)
+ k = k +1
+ continue
+ if k > 5:
+ return None
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ return None
+ else:
+ return rmsg["data"]
+
+ def del_export_acl(self,volname):
+ cmdstr='surfs delexportacl ' + self.iqn_name + volname + ' ' + self.init_name + ' ' + volname
+ ret,rslt=commands.getstatusoutput(cmdstr)
+ if ret !=0:
+ logger.debug('Error:%s'%rslt)
+
+ def start_vm_vol_resume(self,poolname,vol_id):
+ eptmsg=self.get_exprot_msg(vol_id)
+ storage_path=self.target_path + '/' + poolname + '/' + vol_id
+ if eptmsg is None:
+ self.export_root_target(self.init_name, vol_id, poolname)
+ else:
+ if self.init_name == eptmsg['InitiatorName']:
+ p_ip=self.get_volume_pool_ip(vol_id)
+ if p_ip is None:
+ logger.debug('Can not get info of the volume[%s]'%vol_id)
+ return
+ self._clean_overdue_link(storage_path)
+ self._clean_overdue_target(self.iqn_name, vol_id)
+ try:
+ self._discovery_targets(p_ip,vol_id)
+ self._connect_root_target(vol_id, p_ip, self.iqn_name + vol_id,self.iscsi_port)
+ self._link_target_to_rootpath(storage_path, vol_id, p_ip)
+ except:
+ self.export_root_target(self.init_name, vol_id, poolname)
+ else:
+ newip=self._check_target(self.iqn_name,
+ self.init_name,
+ 'a12345678:a12345678',
+ vol_id,
+ storage_path)
+ self._clean_overdue_target(self.iqn_name ,vol_id)
+ self._clean_overdue_link(storage_path)
+ self._connect_root_target(vol_id,
+ newip,
+ self.iqn_name + vol_id,
+ self.iscsi_port)
+ self._link_target_to_rootpath(storage_path,vol_id,newip)
+
+ def _parse_link_paths(self,linkstr):
+ lines=linkstr.split('\n')
+ for x in lines:
+ if 'part' in x:
+ continue
+ return x.split('-lun-')[1].split(':')[0]
+
+ def migrate_vm_prepare(self,poolname,volname):
+ link_dir=self.target_path + '/' + poolname + '/' + volname
+ commands.getstatusoutput("rm -f " + link_dir)
+ '''
+ linkstrs=''
+ try:
+ linkstrs=self._find_target_path(volname)
+ except:
+ pass
+ '''
+ self._clean_overdue_target(self.iqn_name, volname)
+ self.del_export_acl(volname)
+
+ self.export_root_target(self.init_name, volname, poolname)
+
+ def migrate_vm_after(self,poolname,volname):
+ link_dir=self.target_path + '/' + poolname + '/' + volname
+ commands.getstatusoutput("rm -f " + link_dir)
+ linkstrs=''
+ try:
+ linkstrs=self._find_target_path(volname)
+ except:
+ pass
+ if len(linkstrs)> 0:
+ self._clean_overdue_target(self.iqn_name, volname)
+ self.del_export_acl(volname)
+
+ def _excute_cmd(self,cmd_str,surfstype=True):
+ k=0
+ while True:
+ ret,rslt=commands.getstatusoutput(cmd_str)
+ if ret !=0:
+ time.sleep(0.3)
+ k = k +1
+ if k > 5:
+ return None
+ continue
+ if surfstype is False:
+ return rslt
+ rmsg=json.loads(rslt)
+ if rmsg['success'] is False:
+ return None
+ else:
+ return rmsg["data"]
+
+ def check_vol_before_attach(self,vol_id,vol_size,vol_type,ipstr=None):
+ if vol_size < 1:
+ vol_size=1
+ if vol_id is None:
+ return
+ v_type=self.back_type
+ if vol_type is not None:
+ v_type= vol_type
+ cmdstr='surfs volume ' + vol_id
+ if self._excute_cmd(cmdstr) is None:
+ self.create_data_volume(vol_id, str(vol_size), v_type,ipstr)
+ b=0
+ while True:
+ time.sleep(0.5)
+ if self._excute_cmd(cmdstr) is None:
+ b = b +1
+ continue
+ else:
+ break
+ if b > 5:
+ break
+ def get_vol_info(self,vol_id):
+ cmdstr='surfs volume ' + vol_id
+ return self._excute_cmd(cmdstr)
+
+ def check_nodeip_result(self,vol_name):
+ poolip=self.get_volume_pool_ip(vol_name)
+ if poolip is None:
+ return False
+ ipmsg=self._excute_cmd('ip a', surfstype=False)
+ if ipmsg is None:
+ return False
+ if poolip in ipmsg:
+ return True
+ else:
+ return False
+
+ def local_disk_link(self,fileio_path,vol_id,pool_name):
+ cmdstr=''
+ if os.path.exists(self.target_path + '/' + pool_name) is False:
+ cmdstr='mkdir ' + self.target_path + '/' + pool_name
+ self._excute_cmd(cmdstr, surfstype=False)
+ storage_path=self.target_path + '/' + pool_name + '/' + vol_id
+ cmdstr='rm -f ' + storage_path
+ self._excute_cmd(cmdstr, surfstype=False)
+ cmdstr='ln -s ' + fileio_path + ' ' + storage_path
+ self._excute_cmd(cmdstr, surfstype=False)
+
+class AgentResponse(object):
+ def __init__(self, success=True, error=None):
+ self.success = success
+ self.error = error if error else ''
+ self.totalCapacity = None
+ self.availableCapacity = None
+
+class AttachDataVolRsp(AgentResponse):
+ def __init__(self):
+ super(AttachDataVolRsp,self).__init__()
+ self.poolip=''
+ self.iscsiport=''
+ self.target=''
+ self.lun=''
+ self.devicetype='iscsi'
+
+class InitRsp(AgentResponse):
+ def __init__(self):
+ super(InitRsp, self).__init__()
+ self.fsid = None
+ self.userKey = None
+
+class DownloadRsp(AgentResponse):
+ def __init__(self):
+ super(DownloadRsp, self).__init__()
+ self.size = None
+
+class CpRsp(AgentResponse):
+ def __init__(self):
+ super(CpRsp, self).__init__()
+ self.size = None
+ self.actualSize = None
+
+class CreateSnapshotRsp(AgentResponse):
+ def __init__(self):
+ super(CreateSnapshotRsp, self).__init__()
+ self.size = None
+ self.actualSize = None
+
+class GetVolumeSizeRsp(AgentResponse):
+ def __init__(self):
+ super(GetVolumeSizeRsp, self).__init__()
+ self.size = None
+ self.actualSize = None
+
+class PingRsp(AgentResponse):
+ def __init__(self):
+ super(PingRsp, self).__init__()
+ self.operationFailure = False
+ self.fsid=''
+ self.poolclsmsg=''
+
+class GetFactsRsp(AgentResponse):
+ def __init__(self):
+ super(GetFactsRsp, self).__init__()
+ self.fsid = None
+
+class GetVolumeSizeRsp(AgentResponse):
+ def __init__(self):
+ super(GetVolumeSizeRsp, self).__init__()
+ self.size = None
+ self.actualSize = None
+
+def replyerror(func):
+ @functools.wraps(func)
+ def wrap(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception as e:
+ content = traceback.format_exc()
+ err = '%s\n%s\nargs:%s' % (str(e), content, pprint.pformat([args, kwargs]))
+ rsp = AgentResponse()
+ rsp.success = False
+ rsp.error = str(e)
+ logger.warn(err)
+ return jsonobject.dumps(rsp)
+ return wrap
+
+class SurfsAgent(object):
+
+ INIT_PATH = "/surfs/primarystorage/init"
+ CREATE_VOLUME_PATH = "/surfs/primarystorage/volume/createempty"
+ DELETE_PATH = "/surfs/primarystorage/delete"
+ CLONE_PATH = "/surfs/primarystorage/volume/clone"
+ FLATTEN_PATH = "/surfs/primarystorage/volume/flatten"
+ SFTP_DOWNLOAD_PATH = "/surfs/primarystorage/sftpbackupstorage/download"
+ SFTP_UPLOAD_PATH = "/surfs/primarystorage/sftpbackupstorage/upload"
+ ECHO_PATH = "/surfs/primarystorage/echo"
+ CREATE_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/create"
+ DELETE_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/delete"
+ COMMIT_IMAGE_PATH = "/surfs/primarystorage/snapshot/commit"
+ PROTECT_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/protect"
+ ROLLBACK_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/rollback"
+ UNPROTECT_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/unprotect"
+ CP_PATH = "/surfs/primarystorage/volume/cp"
+ DELETE_POOL_PATH = "/surfs/primarystorage/deletepool"
+ GET_VOLUME_SIZE_PATH = "/surfs/primarystorage/getvolumesize"
+ PING_PATH = "/surfs/primarystorage/ping"
+ GET_FACTS = "/surfs/primarystorage/facts"
+ ATTACH_VOLUME_PREPARE = "/surfs/primarystorage/attachprepare"
+ DETACH_VOLUME_AFTER = "/surfs/primarystorage/detachafter"
+ START_VM_BEFORE = "/surfs/primarystorage/startvmbefore"
+ SURFS_MIGRATE_PREPARE = "/surfs/primarystorage/migrateprepare"
+ SURFS_MIGRATE_AFTER = "/surfs/primarystorage/migrateafter"
+
+ http_server = http.HttpServer(port=6731)
+ http_server.logfile_path = log.get_logfile_path()
+
+ def __init__(self):
+ self.http_server.register_async_uri(self.INIT_PATH, self.init)
+ self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
+ self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create)
+ self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
+ self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH, self.commit_image)
+ self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot)
+ self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot)
+ self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot)
+ self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot)
+ self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot)
+ self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
+ self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download)
+ self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload)
+ self.http_server.register_async_uri(self.CP_PATH, self.cp)
+ self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool)
+ self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size)
+ self.http_server.register_async_uri(self.PING_PATH, self.ping)
+ self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
+ self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
+ self.http_server.register_async_uri(self.ATTACH_VOLUME_PREPARE,self.attach_datavol_prepare)
+ self.http_server.register_async_uri(self.DETACH_VOLUME_AFTER,self.detach_datavol_after)
+ self.http_server.register_async_uri(self.START_VM_BEFORE,self.start_vm_before)
+ self.http_server.register_async_uri(self.SURFS_MIGRATE_PREPARE,self.migrate_vm_before)
+ self.http_server.register_async_uri(self.SURFS_MIGRATE_AFTER,self.migrate_vm_after)
+ self.fsid='surfsc48-2cef-454c-b0d0-b6e6b467c022'
+ self.surfs_mgr = SurfsCmdManage()
+
+ @replyerror
+ def init(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rsp = InitRsp()
+ rsp.fsid = self.fsid
+ self._set_capacity_to_response(rsp)
+ rsp.userKey = "AQDVyu9VXrozIhAAuT2yMARKBndq9g3W8KUQvw=="
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def delete(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ xmsg=self._parse_install_path(cmd.installPath)
+ pool=''
+ vol_name=''
+ if len(xmsg) == 2:
+ pool=xmsg[0]
+ vol_name=xmsg[1]
+ if len(xmsg) == 3:
+ pool=xmsg[1]
+ vol_name=xmsg[2]
+
+ self.surfs_mgr.delete_volume(vol_name)
+
+ rsp = AgentResponse()
+ self._set_capacity_to_response(rsp)
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def migrate_vm_after(self,req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rootmsg=cmd.rootinstallPath.split("/");
+ datamsg=cmd.datainstallPath;
+ rsp = AgentResponse()
+ if self.surfs_mgr.init_name is None:
+ rsp.success=False
+ rsp.error='can not get local initorname'
+ self.surfs_mgr.migrate_vm_after(rootmsg[2], rootmsg[3])
+ if datamsg is None or len(datamsg)==0:
+ return jsonobject.dumps(rsp)
+ else:
+ vdvls=vmsg.split(',')
+ for x in vdvls:
+ dvl=x.split(':')
+ if len(dvl) !=2:
+ continue
+ self.surfs_mgr.migrate_vm_after(dvl[0], dvl[1])
+
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def migrate_vm_before(self,req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rootmsg=cmd.rootinstallPath.split("/");
+ datamsg=cmd.datainstallPath;
+ rsp = AgentResponse()
+ if self.surfs_mgr.init_name is None:
+ rsp.success=False
+ rsp.error='can not get local initorname'
+ devstrs=self.surfs_mgr.migrate_vm_prepare(rootmsg[2], rootmsg[3])
+
+ if datamsg is None or len(datamsg)==0:
+ return jsonobject.dumps(rsp)
+ else:
+ vdvls=vmsg.split(',')
+ for x in vdvls:
+ dvl=x.split(':')
+ if len(dvl) !=2:
+ continue
+ self.surfs_mgr.migrate_vm_prepare(dvl[0], dvl[1])
+
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def start_vm_before(self,req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ xmsg=cmd.installPath.split('/')
+ vmsg=cmd.volinstallPath
+ rsp = AgentResponse()
+ if self.surfs_mgr.init_name is None:
+ rsp.success=False
+ rsp.error='can not get local initorname'
+ self.surfs_mgr.start_vm_vol_resume(xmsg[2], xmsg[3])
+
+ if vmsg is None or len(vmsg)==0:
+ return jsonobject.dumps(rsp)
+ else:
+ vdvls=vmsg.split(',')
+ for x in vdvls:
+ dvl=x.split(':')
+ if len(dvl) !=2:
+ continue
+ if self.surfs_mgr.check_nodeip_result(dvl[1]) is True:
+ poolmsg=self.surfs_mgr.get_vol_info(dvl[1])
+ if poolmsg is None:
+ self.surfs_mgr.start_vm_vol_resume(dvl[0], dvl[1])
+ else:
+ fileio_dir='/' + poolmsg['pool'] + '/' + dvl[1] + '/fileio'
+ if os.path.exists(fileio_dir) is False:
+ self.surfs_mgr.start_vm_vol_resume(dvl[0], dvl[1])
+ else:
+ self.surfs_mgr.local_disk_link(fileio_dir,dvl[1],dvl[0])
+ else:
+ self.surfs_mgr.start_vm_vol_resume(dvl[0], dvl[1])
+
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def detach_datavol_after(self,req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rsp=AttachDataVolRsp()
+ xmsg=cmd.installPath.split('/')
+ if len(xmsg) != 4:
+ rsp.success=False
+ rsp.error='installPath[' + cmd.installPath + '] is error'
+ return jsonobject.dumps(rsp)
+ self.surfs_mgr.clean_target_after_detach(xmsg[2], xmsg[3])
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def attach_datavol_prepare(self,req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rsp=AttachDataVolRsp()
+ xmsg=cmd.installPath.split('/')
+ if len(xmsg) != 4:
+ rsp.success=False
+ rsp.error='installPath[' + cmd.installPath + '] is error'
+ return jsonobject.dumps(rsp)
+ self.surfs_mgr.check_vol_before_attach(xmsg[3], cmd.volsize, cmd.voltype,cmd.mgip)
+ vol_ip=self.surfs_mgr.get_volume_pool_ip(xmsg[3])
+ bsign=False
+ if vol_ip == cmd.mgip:
+ poolmsg=self.surfs_mgr.get_vol_info(xmsg[3])
+ if poolmsg is None:
+ bsign=True
+ else:
+ fileio_dir='/' + poolmsg['pool'] + '/' + xmsg[3] + '/fileio'
+ if os.path.exists(fileio_dir) is False:
+ bsign=True
+ else:
+ self.surfs_mgr.local_disk_link(fileio_dir, xmsg[3], xmsg[2])
+ rsp.devicetype='file'
+ else:
+ bsign=True
+
+ if bsign is True:
+ self.surfs_mgr.export_root_target(self.surfs_mgr.init_name, xmsg[3], xmsg[2])
+ self.surfs_mgr._find_target_path(xmsg[3])
+
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def create(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ _,pool,image_name = self._parse_install_path(cmd.installPath)
+
+ size_G = sizeunit.Byte.toGigaByte(cmd.size) + 1
+ size = "%dG" % (size_G)
+ v_type=self.surfs_mgr.back_type
+ try:
+ v_type=getattr(cmd, 'poolcls')
+ except:
+ logger.warn('Can not get attribute:poolcls')
+ #self.surfs_mgr.create_data_volume(image_name,size,v_type)
+ rsp = AgentResponse()
+ self._set_capacity_to_response(rsp)
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def clone(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ _,src_vol_id = self._parse_install_path(cmd.srcPath)
+ _,pname,dst_vol_id = self._parse_install_path(cmd.dstPath)
+
+ _,src_id,src_type=src_vol_id.split('@')
+ if src_type == 'image':
+ self.surfs_mgr.clone_image(src_id, dst_vol_id)
+ else:
+ self.surfs_mgr.clone_vol(src_vol_id,dst_vol_id)
+ rsp = AgentResponse()
+ self._set_capacity_to_response(rsp)
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def commit_image(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ _,_,s_id = self._parse_install_path(cmd.snapshotPath)
+ _,_,v_id = self._parse_install_path(cmd.dstPath)
+ self.surfs_mgr.create_vol_from_snap(s_id,v_id)
+ rsp = CpRsp()
+ rsp.size = self._get_file_size(dpath)
+ self._set_capacity_to_response(rsp)
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def create_snapshot(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ spath = self._normalize_install_path(cmd.snapshotPath)
+
+ do_create = True
+ imagename, sp_name,pooltype = spath.split('@')
+ xmsg=imagename.split('/')
+ image_name=''
+ if len(xmsg) == 2:
+ image_name=xmsg[1]
+ if len(xmsg) == 3:
+ image_name=xmsg[2]
+ rsp = CreateSnapshotRsp()
+ if pooltype == 'image':
+ pass
+ else:
+ if self.surfs_mgr.get_vol_info(image_name) is None:
+ rsp.success=False
+ rsp.error='The volume has never be attached to any vm'
+ return jsonobject.dumps(rsp)
+
+ if cmd.skipOnExisting:
+ if pooltype == 'image':
+ do_create = False
+ else:
+ snaps = self.surfs_mgr.get_snap_exist_byvol(image_name)
+ for s in snaps:
+ do_create = False
+
+ if do_create:
+ self.surfs_mgr.create_snapshot(image_name,sp_name)
+
+
+ if pooltype == 'image':
+ rsp.size= self.surfs_mgr.get_iamge_size(sp_name)
+ else:
+ rsp.size = self.surfs_mgr.get_vol_size(image_name)
+ self._set_capacity_to_response(rsp)
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def delete_snapshot(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ spath = self._normalize_install_path(cmd.snapshotPath)
+ xmsg=spath.split('@')
+ sp_name=xmsg[1]
+ rsp = AgentResponse()
+ try:
+ self.surfs_mgr.delete_snapshot(sp_name)
+ self._set_capacity_to_response(rsp)
+ except Exception, e:
+ logger.debug('%s' % str(e))
+ rsp.success = False
+ rsp.error = str(e)
+ raise
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def protect_snapshot(self, req):
+ rsp = AgentResponse()
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def unprotect_snapshot(self, req):
+ return jsonobject.dumps(AgentResponse())
+
+ @replyerror
+ def rollback_snapshot(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ spath = self._normalize_install_path(cmd.snapshotPath)
+
+ xmsg=spath.split('@')
+ image=xmsg[0].split('/')[2]
+ snap=xmsg[1]
+ afters = self.surfs_mgr.get_after_snaps(image,snap)
+ rsp = AgentResponse()
+
+ if (len(afters) > 0):
+ afters.reverse()
+ rsp.success = False
+ rsp.error = 'If you want to rollback , please delete the later snapshots [%s]' % (afters)
+ else:
+ self.surfs_mgr.rollback_from_snap(snap,image)
+ self._set_capacity_to_response(rsp)
+
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def flatten(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ path = self._normalize_install_path(cmd.path)
+
+ rsp = AgentResponse()
+ rsp.success = False
+ rsp.error = 'unsupported flatten'
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def sftp_upload(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+
+ src_path = self._normalize_install_path(cmd.primaryStorageInstallPath)
+ prikey_file = linux.write_to_temp_file(cmd.sshKey)
+ bs_folder = os.path.dirname(cmd.backupStorageInstallPath)
+
+ rsp = AgentResponse()
+ rsp.success = False
+ rsp.error = 'unsupported SimpleSftpBackupStorage, only supports surfs backupstorage'
+ return jsonobject.dumps(rsp)
+
+
+ @replyerror
+ @rollback
+ def sftp_download(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+
+ hostname = cmd.hostname
+ prikey = cmd.sshKey
+ _,pool, image_name = self._parse_install_path(cmd.primaryStorageInstallPath)
+ tmp_image_name = 'tmp-%s' % image_name
+ prikey_file = linux.write_to_temp_file(prikey)
+
+ rsp = AgentResponse()
+ rsp.success = False
+ rsp.error = 'unsupported SimpleSftpBackupStorage, only supports surfs backupstorage'
+ #self._set_capacity_to_response(rsp)
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def cp(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ _,_,s_id = self._parse_install_path(cmd.srcPath)
+ _,_,v_id = self._parse_install_path(cmd.dstPath)
+ self.surfs_mgr.clone_vol(s_id,v_id)
+
+ rsp = CpRsp()
+ rsp.size = self.surfs_mgr.get_vol_size(v_id)
+ self._set_capacity_to_response(rsp)
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def delete_pool(self, req):
+ return jsonobject.dumps(AgentResponse())
+
+ @replyerror
+ def get_volume_size(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ _,_,vol_id = self._parse_install_path(cmd.installPath)
+ rsp = GetVolumeSizeRsp()
+ rsp.size = self.surfs_mgr.get_vol_size(vol_id)
+ rsp.actualSize = self.surfs_mgr.get_vol_used_size(vol_id)
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def ping(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rsp = PingRsp()
+ rsp.fsid=self.fsid
+ poolsmsg=''
+ if cmd.testImagePath:
+ rmsg=self.surfs_mgr.get_pool_msg()
+ if rmsg is None:
+ rsp.success = False
+ rsp.operationFailure = True
+ rsp.error = "can not do surfs connect"
+ logger.debug("%s" % rsp.error)
+ else:
+ if len(rmsg)> 0:
+ for rsg in rmsg:
+ status='True'
+ if rsg['success'] is False:
+ rsp.success = True
+ rsp.operationFailure = False
+ rsp.error = "Surfs is ready,but pool is breaken"
+ status='false'
+ logger.debug("Surfs is ready,but pool is breaken")
+ if poolsmsg=='':
+ poolsmsg=rsg['class'] + ':' + str(rsg['total']) + ':' + str(rsg['free']) + ':' + status
+ else:
+ pools=poolsmsg.split(',')
+ tmpmsg=''
+ if rsg['class'] + ':' in poolsmsg:
+ for xl in pools:
+ pl=xl.split(':')
+ if rsg['class']== pl[0]:
+ pl[1]=str(long(pl[1]) + rsg['total'])
+ pl[2]=str(long(pl[2]) + rsg['free'])
+ pl[3]=status
+ if tmpmsg=='':
+ tmpmsg=pl[0] + ':' + pl[1] + ':' + pl[2] + ':' + pl[3]
+ else:
+ tmpmsg=tmpmsg + pl[0] + ':' + pl[1] + ':' + pl[2] + ':' + pl[3]
+ poolsmsg=tmpmsg
+ else:
+ poolsmsg=poolsmsg + ',' + rsg['class'] + ':' + str(rsg['total']) + ':' + str(rsg['free']) + ':' + status
+ rsp.poolclsmsg=poolsmsg
+ else:
+ rsp.success = False
+ rsp.operationFailure = True
+ rsp.error = "Surfs is ready,but pool is Null"
+ logger.debug("Surfs is ready,but pool is Null")
+
+ return jsonobject.dumps(rsp)
+
+
+ def _parse_install_path(self, path):
+ return self._normalize_install_path(path).split('/')
+
+ def _set_capacity_to_response(self, rsp):
+ cmdstr='surfs connect'
+ total = 0
+ used = 0
+ rmsg=self.surfs_mgr.get_pool_msg()
+ for pl in rmsg:
+ if pl["success"] is True:
+ total=total + pl["total"]
+ used=used + pl["used"]
+ rsp.totalCapacity = total
+ rsp.availableCapacity = total - used
+
+ def _get_file_size(self, path):
+ pass
+
+ def _get_file_actual_size(self, path):
+ pass
+
+ @replyerror
+ def get_facts(self, req):
+ cmd = jsonobject.loads(req[http.REQUEST_BODY])
+ rsp = GetFactsRsp()
+ rsp.fsid = self.fsid
+ return jsonobject.dumps(rsp)
+
+ @replyerror
+ def echo(self, req):
+ logger.debug('get echoed')
+ return ''
+
+ def _normalize_install_path(self, path):
+ return path.lstrip('surfs:').lstrip('//')
+
+
+class SurfsDaemon(daemon.Daemon):
+ def __init__(self, pidfile):
+ super(SurfsDaemon, self).__init__(pidfile)
+
+ def run(self):
+ self.agent = SurfsAgent()
+ self.agent.http_server.start()
+
diff --git a/surfsprimarystorage/zstack-surfs-primarystorage b/surfsprimarystorage/zstack-surfs-primarystorage
new file mode 100755
index 0000000000..aaa0fcd6ea
--- /dev/null
+++ b/surfsprimarystorage/zstack-surfs-primarystorage
@@ -0,0 +1,52 @@
+#!/bin/sh
+
+# the following is chkconfig init header
+#
+# zstack-surfs-primarystorage: zstack surfs primary storage agent daemon
+#
+# chkconfig: 345 97 03
+# description: This is a daemon instructed by zstack management server \
+# to perform primary storage related operations\
+# See http://zstack.org
+#
+# processname: zstack-surfs-primarystorage
+# pidfile: /var/run/zstack/surfs-primarystorage.pid
+#
+
+check_status() {
+ pidfile='/var/run/zstack/surfs-primarystorage.pid'
+ if [ ! -f $pidfile ]; then
+ echo "zstack surfs-primarystorage agent is stopped"
+ exit 1
+ else
+ pid=`cat $pidfile`
+ ps -p $pid > /dev/null
+ if [ $? -eq 0 ]; then
+ echo "zstack surfs-primarystorage agent is running, pid is $pid"
+ exit 0
+ else
+ echo "zstack surfs-primarystorage is stopped, but pidfile at $pidfile is not cleaned. It may be caused by the agent crashed at last time, manually cleaning it would be ok"
+ exit 1
+ fi
+ fi
+}
+
+if [ $# -eq 0 ]; then
+ echo "usage: $0
+[start|stop|restart|status]"
+ exit 1
+fi
+
+if [ "$@" = "status" ]; then
+ check_status
+else
+ . /var/lib/zstack/virtualenv/surfsp/bin/activate && python -c "from surfsprimarystorage import cdaemon; cdaemon.main()" $@
+fi
+
+if [ $? -eq 0 ]; then
+ echo "$@ zstack surfs-primarystorage agent .... SUCCESS"
+ exit 0
+else
+ echo "$@ zstack surfs-primarystorage agent .... FAILED"
+ exit 1
+fi
diff --git a/zstackbuild/projects/zstack-surfsb.xml b/zstackbuild/projects/zstack-surfsb.xml
new file mode 100644
index 0000000000..eceb63ec01
--- /dev/null
+++ b/zstackbuild/projects/zstack-surfsb.xml
@@ -0,0 +1,46 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/zstackbuild/projects/zstack-surfsp.xml b/zstackbuild/projects/zstack-surfsp.xml
new file mode 100644
index 0000000000..aa0fd92ccc
--- /dev/null
+++ b/zstackbuild/projects/zstack-surfsp.xml
@@ -0,0 +1,46 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+