安装ubuntu20.04.3忽略
#修改默认源为清华源
Mirror address: http://mirrors.tuna.tsinghua.edu.cn/ubuntu (不支持https)
分区去掉LVM分区
#配置root用户远程登录
vim/etc/ssh/sshd_config
重启ssh服务
sudo /etc/init.d/ssh restart
#设置网络
vim/etc/netplan/00-installer-config.yaml
dhcp4:no
dhcp6:no
addresses:[10.0.0.11/21]
gateway4:10.0.0.254
nameservers:
addresses:[223.6.6.6]
//bond配置
network:
version: 2
ethernets:
ens5f0:
dhcp4: no
dhcp6: no
ens5f1:
dhcp4: no
dhcp6: no
bonds:
bond0:
interfaces:
- ens5f0
- ens5f1
addresses: [10.128.20.106/22]
gateway4: 10.128.23.254
nameservers:
addresses: [223.5.5.5]
parameters:
mode: active-backup
mii-monitor-interval: 100
保存并退出
#使设置生效
netplan apply
#更改主机名
hostnamectl set-hostname controller
更改之后退出重新登录 可使用Ctrl D 退出
关闭防火墙与策略
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config; setenforce 0; systemctl stop firewalld; systemctl disable firewalld
# 控制节点
#配置NTP服务器
apt install -y chrony
vim /etc/chrony/chrony.conf
server ntp5.aliyun.com iburst
allow all
local stratum 10
systemctl restart chronyd
chronyc sources -v
查看时间
修改时区
tzselect
硬件同步
hwclock -w
如果同步不成功可以强制指定一个软链接
ln -sf /usr/share/zonginfo/Asia/Shanghai /etc/localtime
timedatectl
ubuntu使用tzselect修改时区无效的解决办法
最近安装了ubuntu 20.04,使用tzselect修改时区不知为何总是无效,最后用了下面的命令:
运行命令dpkg-reconfigure tzdata,选择Asia–>Shanghai,确定,问题解决!
# 计算节点
apt install -y chrony
vim /etc/chrony.conf
pool controller iburst
systemctl restart chronyd
chronyc sources -v
clock -w
timedatectl
#所有节点
#配置源
add-apt-repository cloud-archive:wallaby
# 控制节点
apt install python3-openstackclient -y
# 部署数据库(controller)
apt install mariadb-server python3-pymysql -y
vim /etc/mysql/mariadb.conf.d/99-openstack.cnf
[mysqld]
bind-address = 0.0.0.0
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
service mysql restart
mysql_secure_installation
回车
# 部署消息队列(controller)
#安装服务
apt install rabbitmq-server -y
#添加用户
rabbitmqctl add_user openstack 000000
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
#查看创建的用户
rabbitmqctl list_users
#开启web登录
rabbitmq-plugins enable rabbitmq_management
#修改配置文件使其可以用guest账号登录
vim /usr/lib/rabbitmq/lib/rabbitmq_server-3.8.2/ebin/rabbit.app
39 {loopback_users,[]},
#重启rabbitmq服务
service rabbitmq-server restart
#验证登录
IP:15672
username:guest
password:guest
# 部署缓存(controller)
apt install memcached python3-memcached -y
#修改配置文件
vim /etc/memcached.conf
把监听地址127.0.0.1 修改为0.0.0.0
#重启服务
service memcached restart
Etcd不用配置
============keystone=============
#创建数据库
mysql
#查看数据库字符集是否为utf8
show create database keystone;
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '000000';
#安装keystone服务
apt install keystone -y
#备份原配置文件
cp /etc/keystone/keystone.conf{,.bak}
grep -Ev "^$|#" /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf
#修改配置文件
vim /etc/keystone/keystone.conf
[database]
# ...
connection = mysql pymysql://keystone:000000@controller/keystone
[token]
# ...
provider = fernet
#同步数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
keystone-manage bootstrap --bootstrap-password 000000 \
--bootstrap-admin-url http://controller:5000/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOne
#配置apache Http 服务
vim /etc/apache2/apache2.conf
ServerName controller
service apache2 restart
vim /etc/keystone/admin-openrc
#!/bin/bash
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=000000
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
source /etc/keystone/admin-openrc
openstack domain create --description "An Example Domain" example
#如果报ValueError: Namespace Gtk not available
执行apt install python3-gi gobject-introspection gir1.2-gtk-3.0
openstack project create --domain default --description "Service Project" service
unset OS_AUTH_URL OS_PASSWORD
openstack --os-auth-url http://controller:5000/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name admin --os-username admin token issue
输入密码:000000
vim /etc/keystone/admin-openrc
全部删除然后粘贴下面这段
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=000000
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
source /etc/keystone/admin-openrc
#获取token
openstack token issue
#查看日志
tail -f /var/log/keystone/*
==============glance=============
创库授权
创建项目、角色等
安装服务配置文件
启动服务
mysql
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '000000';
openstack user create --domain default --password 000000 glance
openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
#安装glance
apt install glance -y
cp /etc/glance/glance-api.conf{,.bak}
grep -Ev "^$|#" /etc/glance/glance-api.conf.bak > /etc/glance/glance-api.conf
vim /etc/glance/glance-api.conf
[database]
# ...
connection = mysql pymysql://glance:000000@controller/glance
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = 000000
[paste_deploy]
# ...
flavor = keystone
[glance_store]
# ...
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
#同步数据库
su -s /bin/sh -c "glance-manage db_sync" glance
#重启服务
service glance-api restart
#查看日志
tail -f /var/log/glance/glance-api.log
#下载镜像
http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
# 上传镜像
glance image-create --name "cirros" \
--file cirros-0.4.0-x86_64-disk.img \
--disk-format qcow2 --container-format bare \
--visibility=public
#查看镜像列表
glance image-list
============placement============
mysql
CREATE DATABASE placement;
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY '000000';
openstack user create --domain default --password 000000 placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
#安装placement
apt install placement-api -y
cp /etc/placement/placement.conf{,.bak}
grep -Ev "^$|#" /etc/placement/placement.conf.bak > /etc/placement/placement.conf
vim /etc/placement/placement.conf
[placement_database]
# ...
connection = mysql pymysql://placement:000000@controller/placement
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = 000000
#同步数据库
su -s /bin/sh -c "placement-manage db sync" placement
可忽略此警告
service apache2 restart
#验证
placement-status upgrade check
==============nova============
# 控制节点
mysql
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY '000000';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '000000';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY '000000';
openstack user create --domain default --password 000000 nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
#安装nova
apt install nova-api nova-conductor nova-novncproxy nova-scheduler -y
cp /etc/nova/nova.conf{,.bak}
grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf
vim /etc/nova/nova.conf
[api_database]
# ...
connection = mysql pymysql://nova:000000@controller/nova_api
[database]
# ...
connection = mysql pymysql://nova:000000@controller/nova
[DEFAULT]
# ...
transport_url = rabbit://openstack:000000@controller:5672/
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 000000
[DEFAULT]
# ...
my_ip = 10.0.0.10 //填写控制节点真实IP
[vnc]
enabled = true
# ...
server_listen = $my_ip
server_proxyclient_address = $my_ip
[glance]
# ...
api_servers = http://controller:9292
[oslo_concurrency]
# ...
lock_path = /var/lib/nova/tmp
[placement]
# ...
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 000000
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
service nova-api restart
service nova-scheduler restart
service nova-conductor restart
service nova-novncproxy restart
#此处可以写脚本同时重启4个服务
vim nova-restart.sh
#!/bin/bash
service nova-api restart
service nova-scheduler restart
service nova-conductor restart
service nova-novncproxy restart
使用该命令重启以上4个服务
bash nova-restart.sh
# 计算节点
apt install nova-compute -y
cp /etc/nova/nova.conf{,.bak}
grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf
vim /etc/nova/nova.conf
[DEFAULT]
# ...
transport_url = rabbit://openstack:000000@controller
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 000000
[DEFAULT]
# ...
my_ip = 10.0.0.11 //填写计算节点真实IP
[vnc]
# ...
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://10.0.0.10(填写控制节点IP):6080/vnc_auto.html
注:如果此处填写controller由于用户无法解析controller所以控制台会无法打开
[glance]
# ...
api_servers = http://controller:9292
[oslo_concurrency]
# ...
lock_path = /var/lib/nova/tmp
[placement]
# ...
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 000000
#把默认数据库删除
#查看是否支持虚拟化
egrep -c '(vmx|svm)' /proc/cpuinfo
如果为0 需要编辑配置文件
vim /etc/nova/nova-compute.conf
[libvirt]
# ...
virt_type = qemu
非0 无需配置
#在所有计算重启服务
service nova-compute restart
#控制节点主机发现
openstack compute service list --service nova-compute
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
#设置自动执行
vim /etc/nova/nova.conf
[scheduler]
discover_hosts_in_cells_interval = 300
#重启服务
service nova-api restart
service nova-scheduler restart
service nova-conductor restart
service nova-novncproxy restart
#验证在控制节点执行
openstack compute service list
=============neutron===========
# 控制节点
mysql
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '000000';
openstack user create --domain default --password 000000 neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
#安装neutron
#安装二层网络
apt install neutron-server neutron-plugin-ml2 neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent -y
cp /etc/neutron/neutron.conf{,.bak}
grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf
vim /etc/neutron/neutron.conf
[database]
# ...
connection = mysql pymysql://neutron:000000S@controller/neutron
[DEFAULT]
# ...
core_plugin = ml2
service_plugins =
ort_url = rabbit://openstack:000000@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 000000
[nova]
# ...
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = 000000
[oslo_concurrency]
# ...
lock_path = /var/lib/neutron/tmp
cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}
grep -Ev "^$|#" /etc/neutron/plugins/ml2/ml2_conf.ini.bak > /etc/neutron/plugins/ml2/ml2_conf.ini
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
# ...
type_drivers = flat,vlan
tenant_network_types =
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
# ...
flat_networks = haide //网络名称自行设置
[securitygroup]
# ...
enable_ipset = true
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
grep -Ev "^$|#" /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = haide:eth0 //和上面对应 绑定eth0 网卡
[vxlan]
enable_vxlan = false
[securitygroup]
# ...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
sysctl -p
cp /etc/neutron/dhcp_agent.ini{,.bak}
grep -Ev "^$|#" /etc/neutron/dhcp_agent.ini.bak > /etc/neutron/dhcp_agent.ini
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
# ...
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
#配置源数据
cp /etc/neutron/metadata_agent.ini{,.bak}
grep -Ev "^$|#" /etc/neutron/metadata_agent.ini.bak > /etc/neutron/metadata_agent.ini
vim /etc/neutron/metadata_agent.ini
[DEFAULT]
# ...
nova_metadata_host = controller
metadata_proxy_shared_secret = yongqiu
vim /etc/nova/nova.conf //此处前面已经重定向过所以直接编辑
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 000000
service_metadata_proxy = true
metadata_proxy_shared_secret = yongqiu
#数据库同步
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
//出现OK正常
#重启服务
service nova-api restart
service neutron-server restart
service neutron-linuxbridge-agent restart
service neutron-dhcp-agent restart
service neutron-metadata-agent restart
vim neutron-restart.sh
#!/bin/bash
service neutron-server restart
service neutron-linuxbridge-agent restart
service neutron-dhcp-agent restart
service neutron-metadata-agent restart
bash neutron-restart.sh
# 计算节点
apt install neutron-linuxbridge-agent -y
cp /etc/neutron/neutron.conf{,.bak}
grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf
vim /etc/neutron/neutron.conf
//删除原有数据库
[DEFAULT]
transport_url = rabbit://openstack:000000@controller
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 000000
[oslo_concurrency]
# ...
lock_path = /var/lib/neutron/tmp
#配置二层网络
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
grep -Ev "^$|#" /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = haide:eth0
[vxlan]
enable_vxlan = false
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
sysctl -p
vim /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 000000
#重启服务
service nova-compute restart
service neutron-linuxbridge-agent restart
# 控制节点验证
openstack network agent list
=============Dashboard===========
#控制节点
apt install openstack-dashboard -y
vim /etc/openstack-dashboard/local_settings.py
OPENSTACK_HOST = "controller"
ALLOWED_HOSTS = '*'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}
OPENSTACK_KEYSTONE_URL = "http://%s/:5000" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 3,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_NEUTRON_NETWORK = {
...
'enable_router': False,
'enable_quotas': False,
'enable_ipv6': False,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_fip_topology_check': False,
}
TIME_ZONE = "Asia/Shanghai"
vim /etc/apache2/conf-available/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}
systemctl reload apache2.service
#上传镜像
openstack image create ubuntu18 --disk-format qcow2 --file ubuntu18.03_v1.qcow2
#查看镜像
openstack image list
==========cinder=============
# 控制节点
mysql
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '000000';
openstack user create --domain default --password 000000 cinder
openstack role add --project service --user 000000 admin
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(project_id\)s
apt install cinder-api cinder-scheduler -y
cp /etc/cinder/cinder.conf{,.bak}
grep -Ev "^$|#" /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf
vim /etc/cinder/cinder.conf
[database]
# ...
connection = mysql pymysql://cinder:CINDER_DBPASS@controller/cinder
[DEFAULT]
# ...
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
my_ip = 10.0.0.10
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 000000
[oslo_concurrency]
# ...
lock_path = /var/lib/cinder/tmp
su -s /bin/sh -c "cinder-manage db sync" cinder
vim /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne
service nova-api restart
service cinder-scheduler restart
service apache2 restart
# 计算节点
apt install lvm2 thin-provisioning-tools -y
pvcreate /dev/sdb
vgcreate cinder-volumes /dev/sdb
cp /etc/lvm/lvm.conf{,.bak}
grep -Ev "^$|#" /etc/lvm/lvm.conf.bak > /etc/lvm/lvm.conf
devices {
...
filter = [ "a/sdb/", "r/.*/"]
apt install cinder-volume
cp /etc/cinder/cinder.conf{,.bak}
grep -Ev "^$|#" /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf
vim /etc/cinder/cinder.conf
[database]
# ...
connection = mysql pymysql://cinder:000000@controller/cinder
[DEFAULT]
# ...
transport_url = rabbit://openstack:000000@controller
auth_strategy = keystone
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 000000
[DEFAULT]
# ...
my_ip = 10.0.0.11
enabled_backends = lvm
glance_api_servers = http://controller:9292
[oslo_concurrency]
# ...
lock_path = /var/lib/cinder/tmp
[lvm]
# ...
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = tgtadm
#重启服务
service tgt restart
service cinder-volume restart
===========================常见问题===================================
openstack部署关于Missing value auth-url required for auth plugin password
在控制台输入openstack相关命令时提示如下:
解决方案:
执行
source /etc/keystone/admin-openrc
,