systemctl stop firewalld.service
systemctl disable firewalld
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
#开放网段访问所有ip
firewall-cmd --permanent --zone=public --add-rich-rule='rule family=ipv4 source address=192.168.16.0/24 port port=1-65535 protocol=tcp accept'
firewall-cmd --permanent --zone=public --add-rich-rule='rule family=ipv4 source address=192.168.16.0/24 port port=1-65535 protocol=udp accept'
firewall-cmd --reload
firewall-cmd --list-all
ssh-keygen
ssh-copy-id 192.168.16.82
ssh-copy-id 192.168.16.83
yum -y install centos-release-openstack-stein
vim /etc/hosts
192.168.16.81 controller
192.168.16.82 compute01
192.168.16.83 compute02
scp /etc/hosts 192.168.16.82:/etc/
scp /etc/hosts 192.168.16.83:/etc/
192.168.16.81 执行: hostnamectl set-hostname controller
192.168.16.82 执行: hostnamectl set-hostname compute01
192.168.16.83 执行: hostnamectl set-hostname compute02
yum -y install chrony
timedatectl set-timezone Asia/Shanghai
vim /etc/chrony.conf
#新增如下内容
allow 192.168.16.0/24
local stratum 10
#注销server开头的段
#加入
server ntp.aliyun.com iburst
systemctl enable chronyd.service
systemctl start chronyd.service
timedatectl set-ntp yes
yum -y install chrony
vim /etc/chrony.conf
#添加“server controller iburst”并且删除或者注释掉其余server的选项
server controller iburst
systemctl enable chronyd.service
systemctl restart chronyd.service
chronyc sources
ntpdate -u 192.168.16.81 #手动同步时间
#时间不同步可以查看时区并设置为上海
timedatectl set-timezone Asia/Shanghai
sed -i '$a exclude=kernel*' /etc/yum.conf
sed -i '$a exclude=kernel centos-release' /etc/yum.conf
yum -y update
yum -y install python-openstackclient
yum -y install mariadb mariadb-server python2-PyMySQL
vim /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 192.168.16.81
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
#控制节点管理IP
#可以加免密登录
[client]
user = root
password = hf3366++
systemctl enable mariadb.service
systemctl start mariadb.service
mysql_secure_installation #初始化数据库
enter—y(设置密码)—y—n—n—y
vim /usr/lib/systemd/system/mariadb.service
#在文件[Service]下添加
LimitNOFILE=65535
LimitNPROC=65535
#配置文件内新增
vim /etc/security/limits.conf
* soft nofile 65536
* hard nofile 65536
vim /etc/pam.d/login
session required /lib64/security/pam_limits.so
vim /etc/sysctl.conf
fs.file-max = 65536
执行 sysctl -p
systemctl daemon-reload
systemctl restart mariadb.service
mysql -u root -p
show variables like 'max_connections'; #查询最大连接数
show global status like 'Max_used_connections'; #查询当前连接数
yum -y install rabbitmq-server
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
#创建用户和授权
rabbitmqctl add_user openstack hf3366++
rabbitmqctl set_user_tags openstack administrator
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
rabbitmqctl list_user_permissions openstack #查看权限
#配置浏览器访问
rabbitmq-plugins enable rabbitmq_management
http://192.168.16.81:15672/
#修改rabbitmq默认参数
vim /usr/lib/systemd/system/rabbitmq-server.service
#新增
[Service]
LimitNOFILE=16384
systemctl daemon-reload
systemctl restart rabbitmq-server
yum -y install memcached python-memcached
vim /etc/sysconfig/memcached
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="64"
OPTIONS="-l 127.0.0.1,::1,controller"
#在127.0.0.1,::1后面添加“,controller”,配置服务以使用控制器节点的管理IP地址
yum -y install etcd
cp /etc/etcd/etcd.conf{,.bak}
vim /etc/etcd/etcd.conf
#修改如下9项参数,其余的全部注释掉。
#[Member]
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.16.81:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.16.81:2379"
ETCD_NAME="controller"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.16.81:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.16.81:2379"
ETCD_INITIAL_CLUSTER="controller=http://192.168.16.81:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"
systemctl enable etcd
systemctl start etcd
mysql -u root -p
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
yum -y install openstack-keystone httpd mod_wsgi
cp /etc/keystone/keystone.conf{,.bak}
vim /etc/keystone/keystone.conf
[database]
connection = mysql+pymysql://keystone:hf3366++@controller/keystone
[token]
provider = fernet
su -s /bin/sh -c "keystone-manage db_sync" keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
#验证
ls /etc/keystone/ | egrep "fernet-key"
tree /etc/keystone/fernet-keys/
keystone-manage bootstrap --bootstrap-password hf3366++ --bootstrap-admin-url http://controller:5000/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne
vim /etc/httpd/conf/httpd.conf
ServerName controller
#系统默认注释,需自行修改
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
3.配置自启动
systemctl enable httpd.service
systemctl start httpd.service
4.创建环境变量脚本
vim admin-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=hf3366++
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
1.创建新域
openstack domain create --description "An Example Domain" example
2.创建Service
openstack project create --domain default --description "Service Project" service
3.创建普通项目myproject
openstack project create --domain default --description "Demo Project" myproject
4.创建普通用户myuser(普通用户使用
openstack user create --domain default --password hf3366++ myuser
5.创建角色
openstack role create myrole
6.将myrole角色添加到myproject项目和myuser用户
openstack role add --project myproject --user myuser myrole
openstack user list
openstack role list
openstack project list
openstack role assignment list
mysql -u root -p
CREATE DATABASE placement;
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
1.创建用户placement
openstack user create --domain default --password hf3366++ placement
2.添加角色
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
3.创建Placement API服务端点
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
yum -y install openstack-placement-api
cp /etc/placement/placement.conf{,.bak}
vim /etc/placement/placement.conf
[placement_database]
connection = mysql+pymysql://placement:hf3366++@controller/placement
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = placement
password = hf3366++
vim /etc/httpd/conf.d/00-placement-api.conf
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
#添加在 #SSLCertificateKeyFile ...之下
su -s /bin/sh -c "placement-manage db sync" placement
mysql -e "use placement;show tables;" -u placement -p #验证
systemctl restart httpd
placement-status upgrade check
yum install -y epel-release
yum install -y python-pip
rm -rf /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel-testing.repo
1.安装osc-placement插件
pip install osc-placement
2.列出可用的资源类和特征
openstack --os-placement-api-version 1.2 resource class list --sort-column name
openstack --os-placement-api-version 1.6 trait list --sort-column name
mysql -u root -p
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
1.创建glance用户
openstack user create --domain default --password hf3366++ glance
2.将角色添加到项目和用户
openstack role add --project service --user glance admin
3.创建服务实体
openstack service create --name glance --description "OpenStack Image" image
4.创建Image服务API端点
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
yum -y install openstack-glance
cp /etc/glance/glance-api.conf{,.bak}
vim /etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:hf3366++@controller/glance
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = hf3366++
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
cp /etc/glance/glance-registry.conf{,.bak}
vim /etc/glance/glance-registry.conf
[database]
connection = mysql+pymysql://glance:hf3366++@controller/glance
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = hf3366++
[paste_deploy]
flavor = keystone
su -s /bin/sh -c "glance-manage db_sync" glance
mysql glance -e "show tables;" -u glance -p #验证
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl start openstack-glance-api.service openstack-glance-registry.service
1.下载镜像
wget https://download.cirros-cloud.net/0.5.1/cirros-0.5.1-aarch64-disk.img
2.上传镜像
openstack image create "cirros" --file cirros-0.4.0-aarch64-disk.img --disk-format qcow2 --container-format bare --public
3.验证
openstack image list
mysql -u root -p
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
1.创建nova用户
openstack user create --domain default --password hf3366++ nova
2.将admin角色添加到nova用户
openstack role add --project service --user nova admin
3.创建nova实体
openstack service create --name nova --description "OpenStack Compute" compute
4.创建compute API服务端点
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
yum -y install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler
cp /etc/nova/nova.conf{,.bak}
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:hf3366++@controller
my_ip = 192.168.16.81
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
allow_resize_to_same_host = true
[api_database]
connection = mysql+pymysql://nova:hf3366++@controller/nova_api
[database]
connection = mysql+pymysql://nova:hf3366++@controller/nova
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = hf3366++
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
novncproxy_host=0.0.0.0
novncproxy_port=6080
novncproxy_base_url=http://192.168.16.81:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = hf3366++
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
#验证
mysql nova_api -e "show tables;" -u nova -p
mysql nova -e "show tables;" -u nova -p
mysql nova_cell0 -e "show tables;" -u nova -p
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
openstack compute service list
mysql -u root -p
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
1.创建Neutron用户
openstack user create --domain default --password hf3366++ neutron
2.将admin角色添加到Neutron用户
openstack role add --project service --user neutron admin
3.创建Neutron服务实体
openstack service create --name neutron --description "OpenStack Networking" network
4.创建网络服务器API端点
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
cp /etc/neutron/neutron.conf{,.bak}
vim /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:hf3366++@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[database]
connection = mysql+pymysql://neutron:hf3366++@controller/neutron
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = hf3366++
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = hf3366++
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = true
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:enp125s0f1
[vxlan]
enable_vxlan = true
local_ip = 192.168.16.81
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# local_ip 写第一张网卡ip ,用于内部通讯
#physical_interface_mappings = provider:enp125s0f1 网卡写外网通讯ip,用于分配浮动ip
cp /etc/neutron/l3_agent.ini{,.bak}
vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = linuxbridge
cp /etc/neutron/dhcp_agent.ini{,.bak}
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
cp /etc/neutron/metadata_agent.ini{,.bak}
vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = hf3366++
vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
modprobe br_netfilter
sysctl -p
sed -i '$amodprobe br_netfilter' /etc/rc.local
vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = hf3366++
service_metadata_proxy = true
metadata_proxy_shared_secret = hf3366++
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
mysql neutron -e "show tables;" -u neutron -p #验证
systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl enable neutron-l3-agent.service
systemctl start neutron-l3-agent.service
openstack network agent list #查看
1.安装依赖包
yum -y install glib2-devel zlib-devel pixman-devel librbd-devel libaio-devel
#或(编译usb)
yum -y install glib2-devel zlib-devel pixman-devel librbd-devel libaio-devel libusbx-devel usbredir-devel SDL2-devel
2.下载源码
wget https://download.qemu.org/qemu-4.0.0.tar.xz
3.编译安装
tar -xvf qemu-4.0.0.tar.xz
cd qemu-4.0.0
./configure --enable-rbd --enable-linux-aio
#或
./configure --enable-rbd --enable-linux-aio --enable-spice
#或
./configure --enable-rbd --enable-linux-aio --enable-spice --enable-usb-redir --enable-libusb --enable-sdl --audio-drv-list='sdl'
make -j 50
make install -j 20
4.添加lib库
sed -i '$ainclude /usr/local/lib' /etc/ld.so.conf
5.使配置生效。
ldconfig
1.安装edk2
wget https://www.kraxel.org/repos/firmware.repo -O /etc/yum.repos.d/firmware.repo
yum -y install edk2.git-aarch64
2.安装依赖包。
yum -y install gnutls-devel libnl-devel libxml2-devel yajl-devel device-mapper-devel libpciaccess-devel
3.下载源码。
wget https://libvirt.org/sources/libvirt-5.6.0-1.fc30.src.rpm -O /root/libvirt-5.6.0-1.fc30.src.rpm
4.编译
cd /root/
rpm -i libvirt-5.6.0-1.fc30.src.rpm
yum -y install libxml2-devel readline-devel ncurses-devel libtasn1-devel gnutls-devel libattr-devel libblkid-devel augeas systemd-devel libpciaccess-devel yajl-devel sanlock-devel libpcap-devel libnl3-devel libselinux-devel dnsmasq radvd cyrus-sasl-devel libacl-devel parted-devel device-mapper-devel xfsprogs-devel librados2-devel librbd1-devel glusterfs-api-devel glusterfs-devel numactl-devel libcap-ng-devel fuse-devel netcf-devel libcurl-devel audit-libs-devel systemtap-sdt-devel nfs-utils dbus-devel scrub numad qemu-img rpm-build
rpmbuild -ba ~/rpmbuild/SPECS/libvirt.spec
如果出错则可以使用另外一种方法编译:
rpmbuild --rebuild /root/libvirt-5.6.0-1.fc30.src.rpm
5.安装
yum install -y /root/rpmbuild/RPMS/aarch64/*.rpm
6.重启libvirt服务。
systemctl restart libvirtd
7.修改配置文件“/etc/libvirt/qemu.conf”
vim /etc/libvirt/qemu.conf
nvram = ["/usr/share/AAVMF/AAVMF_CODE.fd:/usr/share/AAVMF/AAVMF_VARS.fd","/usr/share/edk2.git/aarch64/QEMU_EFI-pflash.raw:/usr/share/edk2.git/aarch64/vars-template-pflash.raw"]
8.检验libvirt和QEMU版本。
virsh version
#windows升级
yum install -y edk2.git-ovmf-x64
nvram = ["/usr/share/edk2.git/ovmf-x64/OVMF_CODE-pure-efi.fd:/usr/share/edk2.git/ovmf-x64/OVMF_VARS-pure-efi.fd", ]
1.安装组件
yum -y install openstack-nova-compute
2.编辑文件/etc/nova/nova.conf
cp /etc/nova/nova.conf{,.bak}
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:hf3366++@controller
my_ip = 192.168.16.82
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = hf3366++
[libvirt]
virt_type = kvm
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
vncserver_proxyclient_address = $my_ip
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = hf3366++
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
1.查看数据库的主机
openstack compute service list --service nova-compute
2.发现主机
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
3.自动发现
vim /etc/nova/nova.conf
[scheduler]
discover_hosts_in_cells_interval = 300
4.重启
systemctl restart openstack-nova-api.service
1.列出服务组件
openstack compute service list
2.列出Identity服务中的API端点以验证与Identity服务的连接。
openstack catalog list
3.列出Glance服务中的Image
openstack image list
4.检查单元格和放置API是否正常运行以及其他必要的先决条件是否到位
nova-status upgrade check
yum install -y openstack-neutron-linuxbridge ebtables ipset
cp /etc/neutron/neutron.conf{,.bak}
vim /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:hf3366++@controller
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = hf3366++
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:enp125s0f1
[vxlan]
enable_vxlan = true
local_ip = 192.168.16.86
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = hf3366++
systemctl restart openstack-nova-compute.service #重启
vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
modprobe br_netfilter
sysctl -p
sed -i '$amodprobe br_netfilter' /etc/rc.local
systemctl restart openstack-nova-compute.service
systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service
yum -y install openstack-dashboard
编辑配置文件“/etc/openstack-dashboard/local_settings”并完成以下操作:
cp /etc/openstack-dashboard/local_settings{,.bak}
vim /etc/openstack-dashboard/local_settings
1.配置仪表板以在controller节点上使用OpenStack服务。
OPENSTACK_HOST = "controller"
2.允许所有主机访问,注意格式,逗号后面有一个空格
ALLOWED_HOSTS = ['*', ]
3.配置memcached会话存储服务,请注释掉任何其他会话存储配置,请注意格式
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}
4.启用Identity API版本3
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
5.启用对域的支持
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
6.配置API版本,请注意配置格式
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 3,
}
7.配置Default为通过仪表板创建的用户的默认域
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
8.配置user为通过仪表板创建的用户的默认角色
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
9.请禁用对第3层网络服务的支持
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': False,
'enable_quotas': False,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_lb': False,
'enable_firewall': False,
'enable_***': False,
'enable_fip_topology_check': False,
10.配置时区
TIME_ZONE = "Asia/Shanghai"
vim /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}
#在WSGISocketPrefix run/wsgi添加
4.赋予“/usr/share/openstack-dashboard/”文件夹 apache所属用户、组的权限
chown -R apache:apache /usr/share/openstack-dashboard/
5.重新启动Web服务器和会话存储服务
systemctl restart httpd.service memcached.service
openstack network create --share --external --provider-physical-network provider --provider-network-type flat enp125s0f1
openstack subnet create --network provider --allocation-pool start=192.167.0.3,end=192.167.0.250 --dns-nameserver 114.114.114.114 --gateway 192.167.0.1 --subnet-range 192.167.0.0/19 subnet1
neutron net-list
neutron subnet-list
openstack flavor create --vcpus 1 --ram 64 --disk 1 m1.nano
在dashboard创建即可
创建成功之后查看
openstack server list
mysql -u root -p
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
1.创建服务凭据和Cinder用户
openstack user create --domain default --password hf3366++ cinder
2.将admin角色添加到Cinder用户
openstack role add --project service --user cinder admin
3.创建cinderv2和cinderv3服务实体
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
4.创建Block Storage服务API端点
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(project_id\)s
yum -y install openstack-cinder
cp /etc/cinder/cinder.conf{,.bak}
vim /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:hf3366++@controller
auth_strategy = keystone
my_ip = 192.168.16.81
enabled_backends = ceph
[database]
connection = mysql+pymysql://cinder:hf3366++@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = hf3366++
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
su -s /bin/sh -c "cinder-manage db sync" cinder
#忽略提示信息
mysql cinder -e "show tables;" -u cinder -p #查询
vim /etc/nova/nova.conf
#新增如下内容
[cinder]
os_region_name = RegionOne
1启动Compute API服务
systemctl restart openstack-nova-api.service
2.启动Block Storage服务并将其配置为在系统引导时启动
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service openstack-cinder-backup.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service openstack-cinder-backup.service
3.验证控制节点的安装,state为up,即为状态正常
openstack volume service list
#cinder-volume down是因为ceph相关服务尚未启用并集成到cinder-volume
cinder type-list
cinder type-create ceph
yum -y install openstack-cinder targetcli python-keystone
cp /etc/cinder/cinder.conf{,.bak}
vim /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:hf3366++@controller
auth_strategy = keystone
my_ip = 192.168.16.82
enabled_backends = ceph
glance_api_servers = http://controller:9292
[database]
connection = mysql+pymysql://cinder:hf3366++@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = hf3366++
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
cp /etc/cinder/cinder.conf{,.bak}
scp /etc/cinder/cinder.conf compute02:/etc/cinder/
vim /etc/cinder/cinder.conf
my_ip = 192.168.16.83
systemctl enable openstack-cinder-volume.service target.service openstack-cinder-backup.service
systemctl start openstack-cinder-volume.service target.service openstack-cinder-backup.service
#重启控制节点
systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service
openstack volume service list
# 此时后端存储服务为ceph,但ceph相关服务尚未启用并集成到cinder-volume,导致cinder-volume服务的状态是”down
vim /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=http://download.ceph.com/rpm-nautilus/el7/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://download.ceph.com/rpm-nautilus/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
[Ceph-source]
name=Ceph source packages
baseurl=http://download.ceph.com/rpm-nautilus/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
yum clean all && yum makecache
yum install ceph-deploy python-setuptools
ceph-deploy --version
mkdir /ceph-deploy && cd /ceph-deploy
ceph-deploy new controller --public-network 192.168.16.0/24 --cluster-network 192.168.16.0/24
--public-network:ceph对外使用,访问ceph入口使用。
--cluster-network:用来做数据同步(复制类操作)
#验证
ll
-rw-r--r--. 1 root root 268 12月 2 10:54 ceph.conf
-rw-r--r--. 1 root root 3090 12月 2 10:54 ceph-deploy-ceph.log
-rw-------. 1 root root 73 12月 2 10:54 ceph.mon.keyring
cat ceph.conf
[global]
fsid = 554ef778-c7fd-4c7f-a1e3-fb9e7fb4afd5
public_network = 192.168.16.0/24
cluster_network = 192.168.16.0/24
mon_initial_members = controller
mon_host = 192.168.16.81
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
vim ceph.conf
#在global字段下添加:
mon clock drift allowed = 2
mon clock drift warn backoff = 30
# 会安装ceph ceph-mgr ceph-mon ceph-radosgw ceph-mds
yum -y install ceph
1.推送配置文件(默认推送到/etc/ceph)
ceph-deploy --overwrite-conf config push controller compute01 compute02
2.创建、指定mon是为controller
ceph-deploy --overwrite-conf mon create controller
3.mon初始化
# 初始化之后会自动配置好mon相关的配置
ceph-deploy mon create-initial
ls -l *.keyring #初始化用的密钥文件
-rw-------. 1 root root 113 12月 2 11:28 ceph.bootstrap-mds.keyring
-rw-------. 1 root root 113 12月 2 11:28 ceph.bootstrap-mgr.keyring
-rw-------. 1 root root 113 12月 2 11:28 ceph.bootstrap-osd.keyring
-rw-------. 1 root root 113 12月 2 11:28 ceph.bootstrap-rgw.keyring
-rw-------. 1 root root 151 12月 2 11:28 ceph.client.admin.keyring
-rw-------. 1 root root 73 12月 2 10:54 ceph.mon.keyring
4.mon高可用
ceph-deploy mon add compute01 --address 192.168.16.82
ceph-deploy mon add compute02 --address 192.168.16.83
5.把配置信息拷贝到各节点
ceph-deploy --overwrite-conf config push controller compute01 compute02
ceph-deploy admin controller compute01 compute02
6.查询mon状态
ceph -s
mon: 3 daemons, quorum controller,compute01,compute02 (age 2m)
7.查看法定人数状态
ceph quorum_status --format json-pretty "quorum_names": #有几个节点在仲裁
8.ceph_osd安装
ceph-deploy disk zap controller /dev/sdb
ceph-deploy osd create controller --data /dev/sdb
#compute01
ceph-deploy disk zap compute01 /dev/sdb
ceph-deploy osd create compute01 --data /dev/sdb
ceph-deploy disk zap compute01 /dev/sdc
ceph-deploy osd create compute01 --data /dev/sdc
ceph-deploy disk zap compute01 /dev/sdd
ceph-deploy osd create compute01 --data /dev/sdd
#compute02
ceph-deploy disk zap compute02 /dev/sdb
ceph-deploy osd create compute02 --data /dev/sdb
ceph-deploy disk zap compute02 /dev/sdc
ceph-deploy osd create compute02 --data /dev/sdc
ceph-deploy disk zap compute02 /dev/sdd
ceph-deploy osd create compute02 --data /dev/sdd
7.部署mgr高可用
ceph-deploy --overwrite-conf mgr create controller compute01 compute02
#luminous 版本需要启动 mgr, 否则 ceph -s 会有 no active mgr 提示官方文档建议在每个 monitor 上都启动一个 mgr
8.查看osd状态
ceph osd tree
# 所有STATUS必须为up,如果为down则说明ceph出现问题了
9.查看montior状态
ceph mon stat
e3: 3 mons at {compute01=[v2:192.168.16.82:3300/0,v1:192.168.16.82:6789/0],compute02=[v2:192.168.16.83:3300/0,v1:192.168.16.83:6789/0],controller=[v2:192.168.16.81:3300/0,v1:192.168.16.81:6789/0]}, election epoch 22, leader 0 controller, quorum 0,1,2 controller,compute01,compute02
ceph osd pool create volumes 128
ceph osd pool create images 128
ceph osd pool create backups 128
ceph osd pool create vms 128
ceph osd pool ls #查看
cd /etc/ceph/
UUID=$(uuidgen)
cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
<uuid>${UUID}</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
EOF
scp /etc/ceph/secret.xml compute01:/etc/ceph/
scp /etc/ceph/secret.xml compute02:/etc/ceph/
ceph osd pool application enable images rbd
vim /etc/glance/glance-api.conf
[DEFAULT]
show_image_direct_url = True
[glance_store]
stores = rbd
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8
[paste_deploy]
flavor = keystone
ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' -o /etc/ceph/ceph.client.glance.keyring
chgrp glance /etc/ceph/ceph.client.glance.keyring
chmod 0640 /etc/ceph/ceph.client.glance.keyring
vim ceph.conf
[client.glance]
keyring = /etc/ceph/ceph.client.glance.keyring
#在控制节点上将密钥环文件添加到ceph配置,直接放在配置文件最底部
[root@controller ceph-deploy]# ceph-deploy --overwrite-conf config push controller compute01 compute02
#发送到其他节点
systemctl restart openstack-glance-api.service
1.上传镜像
openstack image create "cirros" --file cirros-0.4.0-aarch64-disk.img --disk-format qcow2 --container-format bare --public
2.查看
glance image-list
3.集群查看
rbd ls images
rbd -p images info 520be008-c4f4-418f-9176-9b0e03d20b72
4.查看快照
rbd snap list images/520be008-c4f4-418f-9176-9b0e03d20b72
5.查看快照详细信息(被保护状态)
rbd info images/520be008-c4f4-418f-9176-9b0e03d20b72@snap
ceph osd pool application enable volumes rbd
ceph osd pool application get volumes
ceph auth get-or-create client.volumes mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images' mgr 'profile rbd pool=volumes, profile rbd pool=vms' -o /etc/ceph/ceph.client.volumes.keyring
scp /etc/ceph/ceph.client.volumes.keyring compute01:/etc/ceph/
scp /etc/ceph/ceph.client.volumes.keyring compute02:/etc/ceph/
cd /etc/ceph/
ceph auth get-key client.volumes |ssh controller tee /etc/ceph/client.volumes.key
ceph auth get-key client.volumes |ssh compute01 tee /etc/ceph/client.volumes.key
ceph auth get-key client.volumes |ssh compute02 tee /etc/ceph/client.volumes.key
chgrp cinder /etc/ceph/ceph.client.volumes.keyring
chmod 0640 /etc/ceph/ceph.client.volumes.keyring
#在所有节点上将密钥环添加到ceph配置文件中,注意如果不用再次添加
vim /etc/ceph/ceph.conf
[client.volumes]
keyring = /etc/ceph/ceph.client.volumes.keyring
scp /etc/ceph/ceph.conf controller:/etc/ceph/
scp /etc/ceph/ceph.conf compute01:/etc/ceph/
scp /etc/ceph/ceph.conf compute02:/etc/ceph/
vim /etc/ceph/cinder.uuid.txt
ae3cf04d-11ce-4983-a601-c2c5bd19bf6d
scp /etc/ceph/cinder.uuid.txt compute01:/etc/ceph/
scp /etc/ceph/cinder.uuid.txt compute02:/etc/ceph/
vim /etc/ceph/cinder.xml
<secret ephemeral='no' private='no'>
<uuid>ae3cf04d-11ce-4983-a601-c2c5bd19bf6d</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
scp /etc/ceph/cinder.xml compute01:/etc/ceph/
scp /etc/ceph/cinder.xml compute02:/etc/ceph/
virsh secret-define --file /etc/ceph/cinder.xml
virsh secret-set-value --secret ae3cf04d-11ce-4983-a601-c2c5bd19bf6d --base64 $(cat /etc/ceph/client.volumes.key)
vim /etc/cinder/cinder.conf
[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
glance_api_version = 2
rbd_user = volumes
rbd_secret_uuid = ae3cf04d-11ce-4983-a601-c2c5bd19bf6d
systemctl restart openstack-cinder-volume.service
tail -f /var/log/cinder/volume.log
ceph osd pool application enable backups rbd
ceph auth get-or-create client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups' -o /etc/ceph/ceph.client.cinder-backup.keyring
scp /etc/ceph/ceph.client.cinder-backup.keyring compute01:/etc/ceph/
scp /etc/ceph/ceph.client.cinder-backup.keyring compute02:/etc/ceph/
cd /etc/ceph/
ceph auth get-key client.cinder-backup |ssh controller tee /etc/ceph/client.cinder-backup.key
ceph auth get-key client.cinder-backup |ssh compute01 tee /etc/ceph/client.cinder-backup.key
ceph auth get-key client.cinder-backup |ssh compute02 tee /etc/ceph/client.cinder-backup.key
chgrp cinder /etc/ceph/ceph.client.cinder-backup.keyring
chmod 0640 /etc/ceph/ceph.client.cinder-backup.keyring
vim /etc/ceph/ceph.conf
[client.cinder-backup]
keyring = /etc/ceph/ceph.client.cinder-backup.keyring
scp /etc/ceph/ceph.conf controller:/etc/ceph/
scp /etc/ceph/ceph.conf compute01:/etc/ceph/
scp /etc/ceph/ceph.conf compute02:/etc/ceph/
1.生成uuid
uuidgen
88e687c2-7847-464b-9703-dc19062480db
2.创建文件
vim /etc/ceph/cinder-backup.uuid.txt
88e687c2-7847-464b-9703-dc19062480db
3.分发
scp /etc/ceph/cinder-backup.uuid.txt compute01:/etc/ceph/
scp /etc/ceph/cinder-backup.uuid.txt compute02:/etc/ceph/
vim /etc/ceph/cinder-backup.xml
<secret ephemeral='no' private='no'>
<uuid>88e687c2-7847-464b-9703-dc19062480db</uuid>
<usage type='ceph'>
<name>cinder-backup secret</name>
</usage>
</secret>
scp /etc/ceph/cinder-backup.xml compute01:/etc/ceph/
scp /etc/ceph/cinder-backup.xml compute02:/etc/ceph/
virsh secret-define --file /etc/ceph/cinder-backup.xml
virsh secret-set-value --secret 88e687c2-7847-464b-9703-dc19062480db --base64 $(cat /etc/ceph/client.cinder-backup.key)
vim /etc/cinder/cinder.conf
[DEFAULT]
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 4194304
backup_ceph_pool = backups
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
rbd_user = cinder-backup
rbd_secret_uuid = 88e687c2-7847-464b-9703-dc19062480db
systemctl restart openstack-cinder-backup.service
ceph osd pool application enable vms rbd
ceph osd pool application get vms
ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' -o /etc/ceph/ceph.client.nova.keyring
cd /ceph-deploy
scp /etc/ceph/ceph.client.nova.keyring compute01:/etc/ceph/
scp /etc/ceph/ceph.client.nova.keyring compute02:/etc/ceph/
ceph auth get-key client.nova |ssh controller tee /etc/ceph/client.nova.key
ceph auth get-key client.nova |ssh compute01 tee /etc/ceph/client.nova.key
ceph auth get-key client.nova |ssh compute02 tee /etc/ceph/client.nova.key
chgrp nova /etc/ceph/ceph.client.nova.keyring
chmod 0640 /etc/ceph/ceph.client.nova.keyring
vim /etc/ceph/ceph.conf
[client.nova]
keyring = /etc/ceph/ceph.client.nova.keyring
scp /etc/ceph/ceph.conf compute01:/etc/ceph/
scp /etc/ceph/ceph.conf compute02:/etc/ceph/
1.生成uuid
uuidgen
22809d1d-e5e7-4256-b615-4510f221ddba
2.创建文件
vim /etc/ceph/nova.uuid.txt
22809d1d-e5e7-4256-b615-4510f221ddba
scp /etc/ceph/nova.uuid.txt compute01:/etc/ceph/
scp /etc/ceph/nova.uuid.txt compute02:/etc/ceph/
vim /etc/ceph/nova.xml
<secret ephemeral='no' private='no'>
<uuid>22809d1d-e5e7-4256-b615-4510f221ddba</uuid>
<usage type='ceph'>
<name>client.nova secret</name>
</usage>
</secret>
scp /etc/ceph/nova.xml compute01:/etc/ceph/
scp /etc/ceph/nova.xml compute02:/etc/ceph/
virsh secret-define --file /etc/ceph/nova.xml
virsh secret-set-value --secret 22809d1d-e5e7-4256-b615-4510f221ddba --base64 $(cat /etc/ceph/client.nova.key)
vim /etc/nova/nova.conf
[DEFAULT]
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
[libvirt]
virt_type = kvm
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
disk_cachemodes="network=writeback"
rbd_user = nova
rbd_secret_uuid = 22809d1d-e5e7-4256-b615-4510f221ddba
systemctl restart openstack-nova-compute
rbd -p vms ls
#对比和dashboard的区别
mysql -u root -p
CREATE DATABASE heat;
GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
1.创建Heat用户
openstack user create --domain default --password hf3366++ heat
2.将admin角色添加到heat用户
openstack role add --project service --user heat admin
3.创建heat和heat-cfn服务实体
openstack service create --name heat --description "Orchestration" orchestration
openstack service create --name heat-cfn --description "Orchestration" cloudformation
4.创建Orchestration服务API端点
openstack endpoint create --region RegionOne orchestration public http://controller:8004/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne orchestration internal http://controller:8004/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne orchestration admin http://controller:8004/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne cloudformation public http://controller:8000/v1
openstack endpoint create --region RegionOne cloudformation internal http://controller:8000/v1
openstack endpoint create --region RegionOne cloudformation admin http://controller:8000/v1
#Orchestration需要Identity Service中的其他信息来管理堆栈。
1.创建Heat包含堆栈项目和用户的域
openstack domain create --description "Stack projects and users" heat
2.创建heat_domain_admin用户以管理heat域中的项目和用户并设置密码
openstack user create --domain heat --password hf3366++ heat_domain_admin
3.将admin角色添加到域中的heat_domain_admin用户heat以启用用户的管理堆栈管理权限heat_domain_admin
openstack role add --domain heat --user-domain heat --user heat_domain_admin admin
4.在Heat域中创建常规项目demo和常规用户demo
openstack project create --domain heat --description "Demo Project" demo
openstack user create --domain heat --password hf3366++ demo
5.创建heat_stack_owner角色
openstack role create heat_stack_owner
6.将heat_stack_owner角色添加到demo项目和用户以启用用户的堆栈管理demo
openstack role add --project demo --user demo heat_stack_owner
7.创建heat_stack_user角色
openstack role create heat_stack_user
1.安装软件包
yum -y install openstack-heat-api openstack-heat-api-cfn openstack-heat-engine
2.修改“/etc/rabbitmq/rabbitmq.config”
vim /etc/rabbitmq/rabbitmq.config
{delegate_count, 96}
3.编辑“/etc/heat/heat.conf”文件并完成以下配置:
cp /etc/heat/heat.conf{,.bak}
vim /etc/heat/heat.conf
[DEFAULT]
transport_url = rabbit://openstack:hf3366++@controller
heat_metadata_server_url = http://controller:8000
heat_waitcondition_server_url = http://controller:8000/v1/waitcondition
stack_domain_admin = heat_domain_admin
stack_domain_admin_password = hf3366++
stack_user_domain_name = heat
num_engine_workers = 4
[heat_api]
workers = 4
[database]
connection = mysql+pymysql://heat:hf3366++@controller/heat
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = heat
password = hf3366++
[trustee]
auth_type = password
auth_url = http://controller:5000
username = heat
password = hf3366++
user_domain_name = default
[clients_keystone]
auth_uri = http://controller:5000
su -s /bin/sh -c "heat-manage db_sync" heat
systemctl enable openstack-heat-api.service openstack-heat-api-cfn.service openstack-heat-engine.service
systemctl start openstack-heat-api.service openstack-heat-api-cfn.service openstack-heat-engine.service
openstack orchestration service list
yum install -y openstack-heat-ui
systemctl restart openstack-heat*
systemctl restart httpd
#可以在dashboard中显示heat
我是Google云的新手,我正在尝试对其进行首次部署。我的第一个部署是RubyonRails项目。我基本上是在关注thisguideinthegoogleclouddocumentation.唯一的区别是我使用的是我自己的项目,而不是他们提供的“helloworld”项目。这是我的app.yaml文件runtime:customvm:trueentrypoint:bundleexecrackup-p8080-Eproductionconfig.ruresources:cpu:0.5memory_gb:1.3disk_size_gb:10当我转到我的项目目录并运行gcloudprevie
我可以在Azure网站上部署RubyonRails吗? 最佳答案 还没有。目前仅支持.NET和PHP。 关于ruby-on-rails-RubyonRails可以部署在Azure网站上吗?,我们在StackOverflow上找到一个类似的问题: https://stackoverflow.com/questions/12964010/
导读:随着叮咚买菜业务的发展,不同的业务场景对数据分析提出了不同的需求,他们希望引入一款实时OLAP数据库,构建一个灵活的多维实时查询和分析的平台,统一数据的接入和查询方案,解决各业务线对数据高效实时查询和精细化运营的需求。经过调研选型,最终引入ApacheDoris作为最终的OLAP分析引擎,Doris作为核心的OLAP引擎支持复杂地分析操作、提供多维的数据视图,在叮咚买菜数十个业务场景中广泛应用。作者|叮咚买菜资深数据工程师韩青叮咚买菜创立于2017年5月,是一家专注美好食物的创业公司。叮咚买菜专注吃的事业,为满足更多人“想吃什么”而努力,通过美好食材的供应、美好滋味的开发以及美食品牌的孵
C#实现简易绘图工具一.引言实验目的:通过制作窗体应用程序(C#画图软件),熟悉基本的窗体设计过程以及控件设计,事件处理等,熟悉使用C#的winform窗体进行绘图的基本步骤,对于面向对象编程有更加深刻的体会.Tutorial任务设计一个具有基本功能的画图软件**·包括简单的新建文件,保存,重新绘图等功能**·实现一些基本图形的绘制,包括铅笔和基本形状等,学习橡皮工具的创建**·设计一个合理舒适的UI界面**注明:你可能需要先了解一些关于winform窗体应用程序绘图的基本知识,以及关于GDI+类和结构的知识二.实验环境Windows系统下的visualstudio2017C#窗体应用程序三.
前置步骤我们都操作完了,这篇开始介绍jenkins的集成。话不多说,看操作1、登录进入jenkins后会让你选择安装插件,选择第一个默认的就行。安装完成后设置账号密码,重新登录。2、配置JDK和Git都需要执行路径,所以需要先把执行路径找到,先进入服务器的docker容器,2.1JDK的路径root@69eef9ee86cf:/usr/bin#echo$JAVA_HOME/usr/local/openjdk-82.2Git的路径root@69eef9ee86cf:/#whichgit/usr/bin/git3、先配置JDK和Git。点击:ManageJenkins>>GlobalToolCon
深度学习部署:Windows安装pycocotools报错解决方法1.pycocotools库的简介2.pycocotools安装的坑3.解决办法更多Ai资讯:公主号AiCharm本系列是作者在跑一些深度学习实例时,遇到的各种各样的问题及解决办法,希望能够帮助到大家。ERROR:Commanderroredoutwithexitstatus1:'D:\Anaconda3\python.exe'-u-c'importsys,setuptools,tokenize;sys.argv[0]='"'"'C:\\Users\\46653\\AppData\\Local\\Temp\\pip-instal
需求:要创建虚拟机,就需要给他提供一个虚拟的磁盘,我们就在/opt目录下创建一个10G大小的raw格式的虚拟磁盘CentOS-7-x86_64.raw命令格式:qemu-imgcreate-f磁盘格式磁盘名称磁盘大小qemu-imgcreate-f磁盘格式-o?1.创建磁盘qemu-imgcreate-fraw/opt/CentOS-7-x86_64.raw10G执行效果#ls/opt/CentOS-7-x86_64.raw2.安装虚拟机使用virt-install命令,基于我们提供的系统镜像和虚拟磁盘来创建一个虚拟机,另外在创建虚拟机之前,提前打开vnc客户端,在创建虚拟机的时候,通过vnc
Ocra无法处理需要“tk”的应用程序require'tk'puts'nope'用奥克拉http://github.com/larsch/ocra不起作用(如链接中的一个问题所述)问题:https://github.com/larsch/ocra/issues/29(Ocra是1.9的"new"rubyscript2exe,本质上它用于将rb脚本部署为可执行文件)唯一的问题似乎是缺少tcl的DLL文件我不认为这是一个问题据我所知,问题是缺少tk的DLL文件如果它们是已知的,则可以在执行ocra时将它们包括在内有没有办法知道tk工作所需的DLL依赖项? 最佳答
我有一个类unzipper.rb,它使用Rubyzip解压文件。在我的本地环境中,我可以成功解压缩文件,而无需使用require'zip'明确包含依赖项但是在Heroku上,我得到一个NameError(uninitializedconstantUnzipper::Zip)我只能通过使用明确的require来解决问题:为什么这在Heroku环境中是必需的,但在本地主机上却不是?我的印象是Rails自动需要所有gem。app/services/unzipper.rbrequire'zip'#OnlyrequiredforHeroku.Workslocallywithout!class
我正在寻找用于Rails的优质管理插件。似乎大多数现有的插件/gem(例如“restful_authentication”、“acts_as_authenticated”)都围绕着self注册等展开。但是,我正在寻找一种功能齐全的基于管理/管理角色的解决方案——但不是简单地附加到另一个非基于角色的解决方案。如果我找不到,我想我会自己动手......只是不想重新发明轮子。 最佳答案 RyanBates最近做了两个关于授权的railscast(注意身份验证和授权之间的区别;身份验证检查用户是否如她所说的那样,授权检查用户是否有权访问资源