반응형

GPDB 설치

GPDB

Preparation

templatePath: E:\vm\linux\template79
displayName: mdw
hostname: mdw.sky.local
path: E:\vm\linux\gpdb
description: mdw
ip: 192.168.181.231
numvcpus: 2
coresPerSocket: 2
memsize: 4096
---
templatePath: E:\vm\linux\template79
displayName: smdw
hostname: smdw.sky.local
path: F:\vm\linux\gpdb
description: smdw
ip: 192.168.181.232
numvcpus: 2
coresPerSocket: 2
memsize: 4096
---
templatePath: E:\vm\linux\template79
displayName: sdw1
hostname: sdw1.sky.local
path: E:\vm\linux\gpdb
description: sdw1
ip: 192.168.181.233
numvcpus: 2
coresPerSocket: 2
memsize: 4096
---
templatePath: E:\vm\linux\template79
displayName: sdw2
hostname: sdw2.sky.local
path: F:\vm\linux\gpdb
description: sdw2
ip: 192.168.181.234
numvcpus: 2
coresPerSocket: 2
memsize: 4096
---
templatePath: E:\vm\linux\template79
displayName: sdw3
hostname: sdw3.sky.local
path: E:\vm\linux\gpdb
description: sdw3
ip: 192.168.181.235
numvcpus: 2
coresPerSocket: 2
memsize: 4096
---
templatePath: E:\vm\linux\template79
displayName: sdw4
hostname: sdw4.sky.local
path: F:\vm\linux\gpdb
description: sdw4
ip: 192.168.181.236
numvcpus: 2
coresPerSocket: 2
memsize: 4096
java -jar E:\vm\CopyVMWare-1.1.0.jar `
 --force `
 --yaml E:\vm\conf\gpdb_hms.yaml

Configuring Your Systems

cat >> /etc/hosts <<EOF

# GPDB HMS
192.168.181.231    mdw.sky.local    mdw
192.168.181.232   smdw.sky.local    smdw
192.168.181.233   sdw1.sky.local    sdw1
192.168.181.234   sdw2.sky.local    sdw2
192.168.181.235   sdw3.sky.local    sdw3
192.168.181.236   sdw4.sky.local    sdw4
EOF
cat >> /etc/bashrc <<EOF

export JAVA_HOME=/usr/lib/jvm/java
export PATH=\${JAVA_HOME}/bin:\${PATH}
EOF

. /etc/bashrc

# yum install -y sshpass
# ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa
# export SSHPASS="PASSWORD"

# for i in {1..1} ; do sshpass -e ssh -o StrictHostKeyChecking=no root@192.168.181.23${i} "mkdir -p ~/.ssh ; chmod 700 ~/.ssh ; touch ~/.ssh/authorized_keys ; echo '$(cat ~/.ssh/id_rsa.pub)' >> ~/.ssh/authorized_keys ; chmod 600 ~/.ssh/authorized_keys" ; done
# for i in {2..8} ; do sshpass -e ssh -o StrictHostKeyChecking=no root@192.168.181.23${i} "rm -rf ~/.ssh ; mkdir -p ~/.ssh ; chmod 700 ~/.ssh ; touch ~/.ssh/authorized_keys ; echo '$(cat ~/.ssh/id_rsa.pub)' >> ~/.ssh/authorized_keys ; chmod 600 ~/.ssh/authorized_keys ; echo SUCCESS" ; done

for i in {1..1} ; do echo     mdw.sky.local ; done | xargs -P 2 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"
for i in {1..1} ; do echo    smdw.sky.local ; done | xargs -P 2 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"
for i in {1..4} ; do echo sdw${i}.sky.local ; done | xargs -P 5 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"
for i in {1..1} ; do echo     mdw ; done | xargs -P 2 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"
for i in {1..1} ; do echo    smdw ; done | xargs -P 2 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"
for i in {1..4} ; do echo sdw${i} ; done | xargs -P 5 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"

for i in {2..6} ; do echo 192.168.181.23${i} ; done | xargs -P 7 -I {} scp /etc/{bashrc,hosts} {}:/etc
for i in {1..6} ; do echo 192.168.181.23${i} ; done | xargs -P 4 -I {} ssh {} "yum install -y net-tools gcc* git vim wget zip unzip tar curl dstat ntp java-1.8.0-openjdk-devel"

Disable or Configure Firewall Software

for i in {1..6} ; do echo 192.168.181.23${i} ; done | xargs -P 8 -I {} ssh {} "systemctl stop firewalld && systemctl disable firewalld"

Synchronizing System Clocks

for i in {1..6} ; do echo 192.168.181.23${i} ; done | xargs -P 8 -I {} ssh {} "systemctl enable ntpd ; systemctl start ntpd ; ntpq -p"
for i in {1..6} ; do echo 192.168.181.23${i} ; done | xargs -P 8 -I {} ssh {} "ntpq -p"

Setting Greenplum Environment Variables

cat >> ~/.bashrc << EOF

# 20220826 hskimsky for gpdb
source /usr/local/greenplum-db/greenplum_path.sh
export MASTER_DATA_DIRECTORY=/data/master/gpseg-1
export PGPORT=5432
export PGUSER=gpadmin
export PGDATABASE=gpadmin
export LD_PRELOAD=/lib64/libz.so.1 ps
EOF
for i in {2..6} ; do echo 192.168.181.23${i} ; done | xargs -P 7 -I {} scp ~/.bashrc {}:~
for i in {1..6} ; do echo 192.168.181.23${i} ; done | xargs -P 8 -I {} ssh {} "mkdir -p ~/Downloads/gpdb"
cd ~/Downloads/gpdb
wget https://github.com/greenplum-db/gpdb/releases/download/6.21.1/open-source-greenplum-db-6.21.1-rhel7-x86_64.rpm
for i in {2..6} ; do echo 192.168.181.23${i} ; done | xargs -P 7 -I {} scp ~/Downloads/gpdb/open-source-greenplum-db-6.21.1-rhel7-x86_64.rpm {}:~/Downloads/gpdb

Disable or Configure SELinux

sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
for i in {2..6} ; do echo 192.168.181.23${i} ; done | xargs -P 7 -I {} scp /etc/selinux/config {}:/etc/selinux

Recommended OS Parameters Settings

The sysctl.conf File

cat >> /etc/sysctl.conf << EOF

# 20220826 for gpdb
# kernel.shmall = _PHYS_PAGES / 2 # See Shared Memory Pages
kernel.shmall = $(echo $(expr $(getconf _PHYS_PAGES) / 2))
# kernel.shmmax = kernel.shmall * PAGE_SIZE 
kernel.shmmax = $(echo $(expr $(getconf _PHYS_PAGES) / 2 \* $(getconf PAGE_SIZE)))
kernel.shmmni = 4096
# See Segment Host Memory
vm.overcommit_memory = 2
vm.overcommit_ratio = 95

# See Port Settings
net.ipv4.ip_local_port_range = 10000 65535
kernel.sem = 250 2048000 200 8192
kernel.sysrq = 1
kernel.core_uses_pid = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.msgmni = 2048
net.ipv4.tcp_syncookies = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_max_syn_backlog = 4096
net.ipv4.conf.all.arp_filter = 1
net.core.netdev_max_backlog = 10000
net.core.rmem_max = 2097152
net.core.wmem_max = 2097152
vm.swappiness = 10
vm.zone_reclaim_mode = 0
vm.dirty_expire_centisecs = 500
vm.dirty_writeback_centisecs = 100
# memory 64GB 이상
# vm.dirty_background_ratio = 0
# vm.dirty_ratio = 0
# vm.dirty_background_bytes = 1610612736
# vm.dirty_bytes = 4294967296
# memory 64GB 미만
vm.dirty_background_ratio = 3
vm.dirty_ratio = 10

$(awk 'BEGIN {OFMT = "%.0f";} /MemTotal/ {print "vm.min_free_kbytes =", $2 * .03;}' /proc/meminfo)
EOF
for i in {2..6} ; do echo 192.168.181.23${i} ; done | xargs -P 7 -I {} scp /etc/sysctl.conf {}:/etc

System Resources Limits

cat >> /etc/security/limits.conf << EOF

# 20220827 hskimsky for gpdb
* soft nofile 524288
* hard nofile 524288
* soft nproc 131072
* hard nproc 131072
EOF
for i in {2..6} ; do echo 192.168.181.23${i} ; done | xargs -P 7 -I {} scp /etc/security/limits.conf {}:/etc/security
vim /etc/default/grub

...
GRUB_CMDLINE_LINUX="... transparent_hugepage=never"
...

for i in {2..6} ; do echo 192.168.181.23${i} ; done | xargs -P 7 -I {} scp /etc/default/grub {}:/etc/default

Creating the Greenplum Administrative User

for i in {1..6} ; do echo 192.168.181.23${i} ; done | xargs -P 6 -I {} ssh {} "groupadd gpadmin"
for i in {1..6} ; do echo 192.168.181.23${i} ; done | xargs -P 6 -I {} ssh {} "useradd gpadmin -r -m -g gpadmin"
for i in {1..6} ; do echo 192.168.181.23${i} ; done | xargs -P 6 -I {} ssh {} "echo 'changeme' | passwd gpadmin --stdin"
cat >> /etc/sudoers << EOF

# 20220827 for gpdb
gpadmin ALL=(ALL) NOPASSWD: ALL
EOF
for i in {2..6} ; do echo 192.168.181.23${i} ; done | xargs -P 7 -I {} scp /etc/sudoers {}:/etc

Installing the Greenplum Database Software

for i in {1..6} ; do echo 192.168.181.23${i} ; done | xargs -P 6 09-I {} ssh {} "yum install -y ~/Downloads/gpdb/open-source-greenplum-db-6.21.1-rhel7-x86_64.rpm"
for i in {1..6} ; do echo 192.168.181.23${i} ; done | xargs -P 6 -I {} ssh {} "chown -R gpadmin:gpadmin /usr/local/greenplum*"
for i in {1..6} ; do echo 192.168.181.23${i} ; done | xargs -P 6 -I {} ssh {} "chgrp -R gpadmin /usr/local/greenplum*"

Enabling Passwordless SSH

su - gpadmin
ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa
export SSHPASS="changeme"
for i in {1..1} ; do sshpass -e ssh -o StrictHostKeyChecking=no 192.168.181.23${i} "mkdir -p ~/.ssh ; chmod 700 ~/.ssh ; touch ~/.ssh/authorized_keys ; echo '$(cat ~/.ssh/id_rsa.pub)' >> ~/.ssh/authorized_keys ; chmod 600 ~/.ssh/authorized_keys" ; done
for i in {2..6} ; do sshpass -e ssh -o StrictHostKeyChecking=no 192.168.181.23${i} "rm -rf ~/.ssh ; mkdir -p ~/.ssh ; chmod 700 ~/.ssh ; touch ~/.ssh/authorized_keys ; echo '$(cat ~/.ssh/id_rsa.pub)' >> ~/.ssh/authorized_keys ; chmod 600 ~/.ssh/authorized_keys ; echo SUCCESS" ; done

for i in {1..1} ; do echo     mdw.sky.local ; done | xargs -P 2 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"
for i in {1..1} ; do echo    smdw.sky.local ; done | xargs -P 2 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"
for i in {1..4} ; do echo sdw${i}.sky.local ; done | xargs -P 5 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"
for i in {1..1} ; do echo     mdw ; done | xargs -P 2 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"
for i in {1..1} ; do echo    smdw ; done | xargs -P 2 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"
for i in {1..4} ; do echo sdw${i} ; done | xargs -P 5 -I {} ssh {} -o StrictHostKeyChecking=no "hostname"

Setting Greenplum Environment Variables

cat >> ~/.bashrc << EOF

# 20220826 hskimsky for gpdb
source /usr/local/greenplum-db/greenplum_path.sh
export MASTER_DATA_DIRECTORY=/data/master/gpseg-1
export PGPORT=5432
export PGUSER=gpadmin
export PGDATABASE=gpadmin
export LD_PRELOAD=/lib64/libz.so.1 ps
EOF
for i in {2..6} ; do echo 192.168.181.23${i} ; done | xargs -P 7 -I {} scp ~/.bashrc {}:~

Confirming Your Installation

gpssh -f hostfile_exkeys -e 'ls -alF /usr/local/greenplum-db/greenplum_path.sh'
[gpadmin@mdw:~]$ gpssh -f hostfile_exkeys -e 'ls -alF /usr/local/greenplum-db/greenplum_path.sh'
[sdw1] ls -alF /usr/local/greenplum-db/greenplum_path.sh
[sdw1] -rw-r--r--. 1 gpadmin gpadmin 650 Aug  6 04:51 /usr/local/greenplum-db/greenplum_path.sh
[sdw3] ls -alF /usr/local/greenplum-db/greenplum_path.sh
[sdw3] -rw-r--r--. 1 gpadmin gpadmin 650 Aug  6 04:51 /usr/local/greenplum-db/greenplum_path.sh
[ mdw] ls -alF /usr/local/greenplum-db/greenplum_path.sh
[ mdw] -rw-r--r--. 1 gpadmin gpadmin 650 Aug  6 04:51 /usr/local/greenplum-db/greenplum_path.sh
[sdw2] ls -alF /usr/local/greenplum-db/greenplum_path.sh
[sdw2] -rw-r--r--. 1 gpadmin gpadmin 650 Aug  6 04:51 /usr/local/greenplum-db/greenplum_path.sh
[smdw] ls -alF /usr/local/greenplum-db/greenplum_path.sh
[smdw] -rw-r--r--. 1 gpadmin gpadmin 650 Aug  6 04:51 /usr/local/greenplum-db/greenplum_path.sh
[sdw4] ls -alF /usr/local/greenplum-db/greenplum_path.sh
[sdw4] -rw-r--r--. 1 gpadmin gpadmin 650 Aug  6 04:51 /usr/local/greenplum-db/greenplum_path.sh
[gpadmin@mdw:~]$

Creating the Data Storage Areas

Creating Data Storage Areas on the Master and Standby Master Hosts

To create the data directory location on the master

mkdir -p /data/master
chown gpadmin:gpadmin /data/master
source /usr/local/greenplum-db/greenplum_path.sh 
gpssh -h smdw -e 'mkdir -p /data/master'
gpssh -h smdw -e 'chown gpadmin:gpadmin /data/master'
[root@mdw:~]# mkdir -p /data/master
[root@mdw:~]# chown gpadmin:gpadmin /data/master
[root@mdw:~]# source /usr/local/greenplum-db/greenplum_path.sh
[root@mdw:~]# gpssh -h smdw -e 'mkdir -p /data/master'
[smdw] mkdir -p /data/master
[root@mdw:~]# gpssh -h smdw -e 'chown gpadmin:gpadmin /data/master'
[smdw] chown gpadmin:gpadmin /data/master
[root@mdw:~]#

Creating Data Storage Areas on Segment Hosts

To create the data directory locations on all segment hosts

cat >> hostfile_gpssh_segonly << EOF
sdw1
sdw2
sdw3
sdw4
EOF
source /usr/local/greenplum-db/greenplum_path.sh 
gpssh -f hostfile_gpssh_segonly -e 'mkdir -p /data/primary'
gpssh -f hostfile_gpssh_segonly -e 'mkdir -p /data/mirror'
gpssh -f hostfile_gpssh_segonly -e 'chown -R gpadmin /data/*'
[root@mdw:~]# cat >> hostfile_gpssh_segonly << EOF
> sdw1
> sdw2
> sdw3
> sdw4
> EOF
[root@mdw:~]# source /usr/local/greenplum-db/greenplum_path.sh
[root@mdw:~]# gpssh -f hostfile_gpssh_segonly -e 'mkdir -p /data/primary'
[sdw2] mkdir -p /data/primary
[sdw1] mkdir -p /data/primary
[sdw3] mkdir -p /data/primary
[sdw4] mkdir -p /data/primary
[root@mdw:~]# gpssh -f hostfile_gpssh_segonly -e 'mkdir -p /data/mirror'
[sdw1] mkdir -p /data/mirror
[sdw4] mkdir -p /data/mirror
[sdw2] mkdir -p /data/mirror
[sdw3] mkdir -p /data/mirror
[root@mdw:~]# gpssh -f hostfile_gpssh_segonly -e 'chown -R gpadmin /data/*'
[sdw4] chown -R gpadmin /data/*
[sdw2] chown -R gpadmin /data/*
[sdw1] chown -R gpadmin /data/*
[sdw3] chown -R gpadmin /data/*
[root@mdw:~]#

Initializing a Greenplum Database System

Initializing Greenplum Database

Creating the Initialization Host File

  • 다음 예제는 segment node 당 3개의 bonding 되지 않은 NIC 가 있다는 가정임
  • load-balance 또는 fault-tolerant network 를 생성하기 위해서는 NIC bonding 추천
ssh gpadmin@mdw
cd ~
mkdir ~/gpconfigs
cd ~/gpconfigs
cat > hostfile_gpinitsystem << EOF
sdw1
sdw2
sdw3
sdw4
EOF

Creating the Greenplum Database Configuration File

# cp $GPHOME/docs/cli_help/gpconfigs/gpinitsystem_config ~/gpconfigs/gpinitsystem_config
cat > ~/gpconfigs/gpinitsystem_config << EOF
ARRAY_NAME="Greenplum Data Platform"
SEG_PREFIX=gpseg
PORT_BASE=6000
declare -a DATA_DIRECTORY=(/data/primary /data/primary)
MASTER_HOSTNAME=mdw.sky.local
MASTER_DIRECTORY=/data/master
MASTER_PORT=5432
TRUSTED_SHELL=ssh
CHECK_POINT_SEGMENTS=8
ENCODING=UNICODE
MIRROR_PORT_BASE=7000
declare -a MIRROR_DATA_DIRECTORY=(/data/mirror /data/mirror)
#DATABASE_NAME=name_of_database
#MACHINE_LIST_FILE=/home/gpadmin/gpconfigs/hostfile_gpinitsystem
EOF

Running the Initialization Utility

To run the initialization utility

cd ~
# gpinitsystem -c gpconfigs/gpinitsystem_config -h gpconfigs/hostfile_gpinitsystem
gpinitsystem -c gpconfigs/gpinitsystem_config -h gpconfigs/hostfile_gpinitsystem -s smdw --mirror-mode=spread
[gpadmin@mdw:~/gpconfigs]$ cd ~
[gpadmin@mdw:~]$ gpinitsystem -c gpconfigs/gpinitsystem_config -h gpconfigs/hostfile_gpinitsystem -s smdw --mirror-mode=spread
20220828:18:44:11:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Checking configuration parameters, please wait...
20220828:18:44:11:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Reading Greenplum configuration file gpconfigs/gpinitsystem_config
20220828:18:44:11:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Locale has not been set in gpconfigs/gpinitsystem_config, will set to default value
20220828:18:44:11:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Locale set to en_US.utf8
20220828:18:44:11:020997 gpinitsystem:mdw:gpadmin-[INFO]:-No DATABASE_NAME set, will exit following template1 updates
20220828:18:44:11:020997 gpinitsystem:mdw:gpadmin-[INFO]:-MASTER_MAX_CONNECT not set, will set to default value 250
20220828:18:44:12:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Checking configuration parameters, Completed
20220828:18:44:12:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Commencing multi-home checks, please wait...
....
20220828:18:44:13:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Configuring build for standard array
20220828:18:44:13:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Sufficient hosts for spread mirroring request
20220828:18:44:13:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Commencing multi-home checks, Completed
20220828:18:44:13:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Building primary segment instance array, please wait...
........
20220828:18:44:18:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Building spread mirror array type , please wait...
........
20220828:18:44:22:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Checking Master host
20220828:18:44:23:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Checking new segment hosts, please wait...
................
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Checking new segment hosts, Completed
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Greenplum Database Creation Parameters
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:---------------------------------------
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master Configuration
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:---------------------------------------
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master instance name       = Greenplum Data Platform
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master hostname            = mdw.sky.local
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master port                = 5432
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master instance dir        = /data/master/gpseg-1
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master LOCALE              = en_US.utf8
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Greenplum segment prefix   = gpseg
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master Database            =
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master connections         = 250
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master buffers             = 128000kB
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Segment connections        = 750
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Segment buffers            = 128000kB
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Checkpoint segments        = 12
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Encoding                   = UNICODE
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Postgres param file        = Off
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Initdb to be used          = /usr/local/greenplum-db-6.21.1/bin/initdb
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-GP_LIBRARY_PATH is         = /usr/local/greenplum-db-6.21.1/lib
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-HEAP_CHECKSUM is           = on
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-HBA_HOSTNAMES is           = 0
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Ulimit check               = Passed
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Array host connect type    = Single hostname per node
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master IP address [1]      = ::1
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master IP address [2]      = 192.168.181.231
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Master IP address [3]      = fe80::3f77:4886:8cc0:25ba
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Standby Master             = smdw
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Number of primary segments = 2
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Standby IP address         = ::1
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Standby IP address         = 192.168.181.232
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Standby IP address         = fe80::1958:6310:7a95:7422
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Standby IP address         = fe80::3f77:4886:8cc0:25ba
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Standby IP address         = fe80::7934:f85b:a866:6599
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Total Database segments    = 8
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Trusted shell              = ssh
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Number segment hosts       = 4
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Mirror port base           = 7000
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Number of mirror segments  = 2
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Mirroring config           = ON
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Mirroring type             = Spread
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:----------------------------------------
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Greenplum Primary Segment Configuration
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:----------------------------------------
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw1.sky.local        6000    sdw1    /data/primary/gpseg0    2
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw1.sky.local        6001    sdw1    /data/primary/gpseg1    3
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw2.sky.local        6000    sdw2    /data/primary/gpseg2    4
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw2.sky.local        6001    sdw2    /data/primary/gpseg3    5
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw3.sky.local        6000    sdw3    /data/primary/gpseg4    6
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw3.sky.local        6001    sdw3    /data/primary/gpseg5    7
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw4.sky.local        6000    sdw4    /data/primary/gpseg6    8
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw4.sky.local        6001    sdw4    /data/primary/gpseg7    9
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:---------------------------------------
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Greenplum Mirror Segment Configuration
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:---------------------------------------
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw2.sky.local        7000    sdw2    /data/mirror/gpseg0     10
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw3.sky.local        7001    sdw3    /data/mirror/gpseg1     11
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw3.sky.local        7000    sdw3    /data/mirror/gpseg2     12
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw4.sky.local        7001    sdw4    /data/mirror/gpseg3     13
20220828:18:44:43:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw4.sky.local        7000    sdw4    /data/mirror/gpseg4     14
20220828:18:44:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw1.sky.local        7001    sdw1    /data/mirror/gpseg5     15
20220828:18:44:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw1.sky.local        7000    sdw1    /data/mirror/gpseg6     16
20220828:18:44:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-sdw2.sky.local        7001    sdw2    /data/mirror/gpseg7     17

Continue with Greenplum creation Yy|Nn (default=N):
> Y
20220828:18:44:51:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Building the Master instance database, please wait...
20220828:18:44:56:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Starting the Master in admin mode
20220828:18:44:57:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Commencing parallel build of primary segment instances
20220828:18:44:57:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Spawning parallel processes    batch [1], please wait...
........
20220828:18:44:57:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Waiting for parallel processes batch [1], please wait...
......................
20220828:18:45:19:020997 gpinitsystem:mdw:gpadmin-[INFO]:------------------------------------------------
20220828:18:45:19:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Parallel process exit status
20220828:18:45:19:020997 gpinitsystem:mdw:gpadmin-[INFO]:------------------------------------------------
20220828:18:45:19:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Total processes marked as completed           = 8
20220828:18:45:19:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Total processes marked as killed              = 0
20220828:18:45:19:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Total processes marked as failed              = 0
20220828:18:45:19:020997 gpinitsystem:mdw:gpadmin-[INFO]:------------------------------------------------
20220828:18:45:19:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Removing back out file
20220828:18:45:19:020997 gpinitsystem:mdw:gpadmin-[INFO]:-No errors generated from parallel processes
20220828:18:45:19:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Restarting the Greenplum instance in production mode
20220828:18:45:19:028443 gpstop:mdw:gpadmin-[INFO]:-Starting gpstop with args: -a -l /home/gpadmin/gpAdminLogs -m -d /data/master/gpseg-1
20220828:18:45:19:028443 gpstop:mdw:gpadmin-[INFO]:-Gathering information and validating the environment...
20220828:18:45:19:028443 gpstop:mdw:gpadmin-[INFO]:-Obtaining Greenplum Master catalog information
20220828:18:45:19:028443 gpstop:mdw:gpadmin-[INFO]:-Obtaining Segment details from master...
20220828:18:45:19:028443 gpstop:mdw:gpadmin-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.21.1 build commit:fff63ec5cc64f2adc033fc1203afbc5fbb9ad7d9 Open Source'
20220828:18:45:19:028443 gpstop:mdw:gpadmin-[INFO]:-Commencing Master instance shutdown with mode='smart'
20220828:18:45:19:028443 gpstop:mdw:gpadmin-[INFO]:-Master segment instance directory=/data/master/gpseg-1
20220828:18:45:19:028443 gpstop:mdw:gpadmin-[INFO]:-Stopping master segment and waiting for user connections to finish ...
server shutting down
20220828:18:45:20:028443 gpstop:mdw:gpadmin-[INFO]:-Attempting forceful termination of any leftover master process
20220828:18:45:20:028443 gpstop:mdw:gpadmin-[INFO]:-Terminating processes for segment /data/master/gpseg-1
20220828:18:45:20:028466 gpstart:mdw:gpadmin-[INFO]:-Starting gpstart with args: -a -l /home/gpadmin/gpAdminLogs -d /data/master/gpseg-1
20220828:18:45:20:028466 gpstart:mdw:gpadmin-[INFO]:-Gathering information and validating the environment...
20220828:18:45:20:028466 gpstart:mdw:gpadmin-[INFO]:-Greenplum Binary Version: 'postgres (Greenplum Database) 6.21.1 build commit:fff63ec5cc64f2adc033fc1203afbc5fbb9ad7d9 Open Source'
20220828:18:45:20:028466 gpstart:mdw:gpadmin-[INFO]:-Greenplum Catalog Version: '301908232'
20220828:18:45:20:028466 gpstart:mdw:gpadmin-[INFO]:-Starting Master instance in admin mode
20220828:18:45:20:028466 gpstart:mdw:gpadmin-[INFO]:-Obtaining Greenplum Master catalog information
20220828:18:45:20:028466 gpstart:mdw:gpadmin-[INFO]:-Obtaining Segment details from master...
20220828:18:45:20:028466 gpstart:mdw:gpadmin-[INFO]:-Setting new master era
20220828:18:45:20:028466 gpstart:mdw:gpadmin-[INFO]:-Master Started...
20220828:18:45:21:028466 gpstart:mdw:gpadmin-[INFO]:-Shutting down master
20220828:18:45:21:028466 gpstart:mdw:gpadmin-[INFO]:-Commencing parallel segment instance startup, please wait...
.
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-Process results...
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-----------------------------------------------------
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-   Successful segment starts                                            = 8
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-   Failed segment starts                                                = 0
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-   Skipped segment starts (segments are marked down in configuration)   = 0
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-----------------------------------------------------
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-Successfully started 8 of 8 segment instances
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-----------------------------------------------------
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-Starting Master instance mdw.sky.local directory /data/master/gpseg-1
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-Command pg_ctl reports Master mdw.sky.local instance active
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-Connecting to dbname='template1' connect_timeout=15
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-No standby master configured.  skipping...
20220828:18:45:23:028466 gpstart:mdw:gpadmin-[INFO]:-Database successfully started
20220828:18:45:23:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Completed restart of Greenplum instance in production mode
20220828:18:45:23:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Commencing parallel build of mirror segment instances
20220828:18:45:23:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Spawning parallel processes    batch [1], please wait...
........
20220828:18:45:23:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Waiting for parallel processes batch [1], please wait...
...........
20220828:18:45:34:020997 gpinitsystem:mdw:gpadmin-[INFO]:------------------------------------------------
20220828:18:45:34:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Parallel process exit status
20220828:18:45:34:020997 gpinitsystem:mdw:gpadmin-[INFO]:------------------------------------------------
20220828:18:45:34:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Total processes marked as completed           = 8
20220828:18:45:34:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Total processes marked as killed              = 0
20220828:18:45:34:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Total processes marked as failed              = 0
20220828:18:45:34:020997 gpinitsystem:mdw:gpadmin-[INFO]:------------------------------------------------
20220828:18:45:35:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Starting initialization of standby master smdw
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Validating environment and parameters for standby initialization...
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Checking for data directory /data/master/gpseg-1 on smdw
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:------------------------------------------------------
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum standby master initialization parameters
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:------------------------------------------------------
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum master hostname               = mdw.sky.local
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum master data directory         = /data/master/gpseg-1
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum master port                   = 5432
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum standby master hostname       = smdw
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum standby master port           = 5432
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum standby master data directory = /data/master/gpseg-1
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum update system catalog         = On
20220828:18:45:35:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Syncing Greenplum Database extensions to standby
20220828:18:45:36:030546 gpinitstandby:mdw:gpadmin-[INFO]:-The packages on smdw are consistent.
20220828:18:45:36:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Adding standby master to catalog...
20220828:18:45:36:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Database catalog updated successfully.
20220828:18:45:36:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Updating pg_hba.conf file...
20220828:18:45:38:030546 gpinitstandby:mdw:gpadmin-[INFO]:-pg_hba.conf files updated successfully.
20220828:18:45:39:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Starting standby master
20220828:18:45:39:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Checking if standby master is running on host: smdw  in directory: /data/master/gpseg-1
20220828:18:45:43:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Cleaning up pg_hba.conf backup files...
20220828:18:45:44:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Backup files of pg_hba.conf cleaned up successfully.
20220828:18:45:44:030546 gpinitstandby:mdw:gpadmin-[INFO]:-Successfully created standby master on smdw
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Successfully completed standby master initialization
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Scanning utility log file for any warning messages
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[WARN]:-*******************************************************
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[WARN]:-Scan of log file indicates that some warnings or errors
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[WARN]:-were generated during the array creation
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Please review contents of log file
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-/home/gpadmin/gpAdminLogs/gpinitsystem_20220828.log
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-To determine level of criticality
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-These messages could be from a previous run of the utility
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-that was called today!
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[WARN]:-*******************************************************
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Greenplum Database instance successfully created
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-------------------------------------------------------
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-To complete the environment configuration, please
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-update gpadmin .bashrc file with the following
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-1. Ensure that the greenplum_path.sh file is sourced
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-2. Add "export MASTER_DATA_DIRECTORY=/data/master/gpseg-1"
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-   to access the Greenplum scripts for this instance:
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-   or, use -d /data/master/gpseg-1 option for the Greenplum scripts
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-   Example gpstate -d /data/master/gpseg-1
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Script log file = /home/gpadmin/gpAdminLogs/gpinitsystem_20220828.log
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-To remove instance, run gpdeletesystem utility
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Standby Master smdw has been configured
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-To activate the Standby Master Segment in the event of Master
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-failure review options for gpactivatestandby
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-------------------------------------------------------
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-The Master /data/master/gpseg-1/pg_hba.conf post gpinitsystem
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-has been configured to allow all hosts within this new
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-array to intercommunicate. Any hosts external to this
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-new array must be explicitly added to this file
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-Refer to the Greenplum Admin support guide which is
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-located in the /usr/local/greenplum-db-6.21.1/docs directory
20220828:18:45:44:020997 gpinitsystem:mdw:gpadmin-[INFO]:-------------------------------------------------------
[gpadmin@mdw:~]$

Start

Restart

gpstop -r

pg_hba.conf 적용

gpstop -u

Stop

gpstop -M fast

Start

gpstart -a

Recovery

ssh gpadmin@smdw
cat > ${MASTER_DATA_DIRECTORY}/recovery.conf << EOF
standby_mode = 'on'
primary_conninfo = 'user=gpadmin host=mdw.sky.local port=5432 sslmode=prefer sslcompression=1 krbsrvname=postgres application_name=gp_walreceiver'
EOF
gpactivatestandby

reboot

for i in {6..1} ; do ssh 192.168.181.23${i} "reboot" ; done

shutdown

for i in {6..1} ; do ssh 192.168.181.23${i} "shutdown -h now" ; done

References

반응형

'[DB] > [PostgreSQL]' 카테고리의 다른 글

postgresql 8.2 text to boolean  (0) 2016.07.09
postgresql 9.4, postgis 설치하기  (0) 2015.08.06
postgresql 에서 테이블 정보 추출하기  (0) 2015.03.03
pgadmin 설치  (0) 2015.02.23
PostgreSQL과 MySQL의 차이점  (0) 2015.02.23
Posted by FeliZ_하늘..
,
반응형
apache hawq 는 postgresql 기반이다.

postgis 는 postgresql 에서 gis 연산을 할 수 있도록 해주는 postgresql extension 이다.

postgresql 9.2 이상 설치 가능한 것으로 기억하고 있다.

그런데 최근 greenplum committer 로 예상되는 누군가가 apache hawq 에서 postgis 를 사용 가능하도록 포팅했다.

greenplum gis extension 을 이용했다고 한다.

그래서 아래의 링크를 기반으로


apache hawq 에서 postgis 를 설치하는 아래의 문서를 작성했다.

https://github.com/HaNeul-Kim/apache-hawq-build/blob/master/postgis_for_hawq_build.asciidoc


반응형
Posted by FeliZ_하늘..
,
반응형

Apache HAWQ 2.0.0 을 resource manager 를 yarn 으로 설치 후 pgadmin 으로 연결하려고 하자


pg_resqueue table 이 없어서 연결이 안되고


hawq yarn application 이 재시작 되었다.


그래서 Pivotal HAWQ 2.0.0 을 설치하기로 하고 그 전에 HDP 를 설치했다.


http://hskimsky.tistory.com/111


그리고나서 pgadmin 으로 HAWQ 에 연결하려고 하자 여전히 yarn 에서 hawq 가 재시작됐다.


왜 이러는지 문서를 더 찾아보든가 committer 에게 문의하든가 해야겠다

반응형
Posted by FeliZ_하늘..
,
반응형

직접 작성한 문서입니다


Apache Hadoop 2.7.2 버전 위에 HAWQ 2.0 dev 버전을 resource manager 를 yarn 으로 설치하고 HAWQ 에서 hdfs 와 web 의 파일을 읽어서 external table 을 만드는 것을 목표로 합니다.

Apache HAWQ 를 build, install 하는 순서와 명령어를 아주 자세하게 적어놓았습니다.

hadoop install 방법은 포함하지 않았고 pxf install 을 포함하고 있습니다.

먼저 챕터별 전체 실행 스크립트를 적어놓았고 그 뒤에 부분별 실행 스크립트를 적어놓았고 그 다음에 실행 한 결과 로그를 적어놓았습니다.

문서는 asciidoc 으로 작성하였으며 repository 내에 asciidoc 파일 및 pdf 파일도 포함되어 있습니다.

잘못된 점이 있으면 hskimsky@gmail.com 으로 메일 주시기 바랍니다.

빌드 성공을 기원합니다.


https://github.com/HaNeul-Kim/apache-hawq-build

반응형
Posted by FeliZ_하늘..
,
반응형

CentOS 6.7

Oracle JDK 1.7.0_80

Apache Maven 3.3.9

Python 2.6.6

Apache Hadoop 2.7.2

Apache HAWQ 2.0.0.0 dev

PXF 3.0.0


위 사양으로 설치 완료

Apache HAWQ build 만 6개월정도 삽질 한 듯..

문서에서는 gcc 버전을 4.7.2 이상 쓰라는데

centos 6.7에서 gcc 버전 4.7.2 로 올리면 yum 이 안됨

(나는 그냥 default 인 4.4.7 로 했음)

kernal 옵션 설정시 본인의 사양과 잘 비교하면서 할 것. 부팅이 불가할 수도 있음

semaphore 설정은 필수임

PXF 설치도 자잘하게 해줘야 할 것들이 많음

PXF 는 51200 port 로 실행하고 external table location 의 port 도 51200 으로 사용함


지금은 single node 로 구성했는데

조만간 vm 4개정도로 HAWQ HA 구성까지 하면서 메뉴얼 작성 해봐야겠다


반응형
Posted by FeliZ_하늘..
,
반응형

기본적으로 HAWQ 는 postgresql 8.2 를 사용중이므로


아래 쿼리들은 몇몇 쿼리를 제외하고는 postgresql 8.2 에서도 사용 가능하지만


postgresql 9.x 에서는 8.x 와 엔진 자체가 달라서 사용 불가능하다.


mybatis 에서 사용하는 쿼리 형식 비슷하게 개발중이기 때문에


아래 쿼리 중에는 바로 실행 불가능한 쿼리들도 있다


간혹 dbSchemaTable 이란 변수가 있는데 이는


database.schema.table 형태로 써주면 된다. ex) gpadmin.public.table_name


설명

쿼리


현재 실행중인 쿼리의 pid 조회

SELECT pg_backend_pid();


autocommit 상태 조회

show autocommit;


hawq version 조회

SELECT

    productversion

FROM

    pg_catalog.gp_version_at_initdb;


모든 데이터베이스 database 조회

SELECT

    datname "databaseName"

FROM

    pg_database

ORDER BY

    datname;


모든 스키마 schema 조회

SELECT

    schema_name "schemaName"

FROM

    information_schema.schemata

ORDER BY

    schema_name;


기본 스키마 default schema 조회

SHOW search_path;


특정 database 의 schema 내 모든 테이블 table 조회

SELECT

    t.table_catalog

   ,t.table_schema

   ,t.table_name

   ,t.table_type

   ,t.is_insertable_into

   ,t.is_typed

   ,c.relstorage

   ,c.oid::int

   ,'TABLE' object_type

   ,'('||pg_size_pretty(pg_relation_size(c.oid))||')' table_size

   ,'('||pg_size_pretty(

        (

            SELECT

                sum(pg_relation_size(ns.nspname||'.'||pp.partitiontablename))

            FROM

                pg_class cl

                    INNER JOIN pg_namespace ns ON cl.relnamespace = ns.oid

                    INNER JOIN pg_partitions pp ON cl.relname = pp.tablename

            WHERE pp.tablename = c.relname

        )::bigint

   )||')' size

FROM

    information_schema.tables t

    INNER JOIN pg_namespace s ON t.table_schema = s.nspname

    INNER JOIN pg_class c ON t.table_name = c.relname AND s.oid = c.relnamespace

    LEFT JOIN pg_partitions p ON t.table_name = p.partitiontablename

WHERE

    p.tablename IS NULL

GROUP BY

    t.table_catalog

   ,t.table_schema

   ,t.table_name

   ,t.table_type

   ,t.is_insertable_into

   ,t.is_typed

   ,c.relstorage

   ,p.partitiontablename

   ,c.oid

   ,c.relname

HAVING

    t.table_catalog = #{databaseName} AND

    t.table_schema = #{schemaName} AND

    c.relstorage NOT IN ('v', 'x')

ORDER BY

    t.table_name;


특정 database 의 schema 내 모든 뷰 view 조회

SELECT

    c.oid::int

   ,t.table_catalog

   ,t.table_schema

   ,t.table_name

   ,t.table_type

   ,t.is_insertable_into

   ,t.is_typed

   ,'VIEW' object_type

FROM

    information_schema.tables t

    INNER JOIN pg_namespace s ON t.table_schema = s.nspname

    INNER JOIN pg_class c ON t.table_name = c.relname AND s.oid = c.relnamespace

    LEFT JOIN pg_partitions p ON t.table_name = p.partitiontablename

WHERE

    t.table_catalog = #{databaseName} AND

    t.table_schema = #{schemaName} AND

    t.table_type = 'VIEW'

ORDER BY

    t.table_name;


특정 database 의 schema 내 모든 external 테이블 external table 조회

SELECT

    t.table_catalog

   ,t.table_schema

   ,t.table_name

   ,t.table_type

   ,t.is_insertable_into

   ,t.is_typed

   ,c.relstorage

   ,c.oid::int

   ,'EXTTABLE' object_type

FROM

    information_schema.tables t

    INNER JOIN pg_namespace s ON t.table_schema = s.nspname

    INNER JOIN pg_class c ON t.table_name = c.relname AND s.oid = c.relnamespace

GROUP BY

    t.table_catalog

   ,t.table_schema

   ,t.table_name

   ,t.table_type

   ,t.is_insertable_into

   ,t.is_typed

   ,c.relstorage

   ,c.oid

HAVING

    t.table_catalog = #{databaseName} AND

    t.table_schema = #{schemaName} AND

    c.relstorage = 'x'

ORDER BY

    t.table_name;


특정 database 의 schema 내 모든 함수 function 조회

SELECT

    p.oid

   ,a.rolname

   ,n.nspname

   ,p.proname

   ,l.lanname

   ,p.pronargs

   ,p.prorettype::regtype

   ,oidvectortypes(p.proargtypes) proargtypes

   ,p.prosrc

   ,p.proname||'('||oidvectortypes(p.proargtypes)||')' procdesc

   ,'FUNCTION' object_type

FROM pg_proc p

    INNER JOIN pg_namespace n ON n.oid = p.pronamespace

    INNER JOIN pg_authid a ON a.oid = p.proowner

    INNER JOIN pg_language l ON l.oid = p.prolang

WHERE

    n.nspname = #{schemaName} AND

    p.prorettype != 'trigger'::regtype

ORDER BY

    p.proname;


특정 database 의 schema 내 object 의 모든 컬럼 column 조회

SELECT

    table_catalog

   ,table_schema

   ,table_name

   ,column_name

   ,ordinal_position

   ,column_default

   ,is_nullable

   ,data_type

   ,character_maximum_length

   ,col_description(#{dbSchemaTable}::regclass, ordinal_position) column_comment

FROM

    information_schema.columns

WHERE

    table_catalog = #{databaseName} AND

    table_schema = #{schemaName} AND

    table_name = #{tableName}

ORDER BY

    ordinal_position;


모든 tablespace 조회

SELECT

    spcname

FROM

    pg_tablespace

ORDER BY

    spcname;


모든 custom formatter 조회

SELECT

    p.proname

FROM

    pg_proc p

    INNER JOIN pg_namespace n ON n.oid = p.pronamespace

    INNER JOIN pg_authid a ON a.oid = p.proowner

    INNER JOIN pg_language l ON l.oid = p.prolang

WHERE

    (p.prorettype = 'record'::regtype OR oidvectortypes(p.proargtypes) = 'record') AND

    l.lanname != 'internal' AND

    n.nspname = 'pg_catalog'

ORDER BY p.proname;


특정 database 의 schema 내 모든 제약사항 constraint 조회

SELECT

    conname

   ,contype

   ,array_to_string(conkey, ',') conkeys

   ,array_upper(conkey, 1) length

   ,substring(consrc from 2 for char_length(consrc) - 2) consrc

FROM

    pg_constraint

WHERE

    #{dbSchemaTable}::regclass = conrelid

ORDER BY

    length

   ,conname;


특정 테이블의 파티션정의 partition definition 조회

SELECT pg_get_partition_def(#{dbSchemaTable}::regclass) partitiondef;


특정 테이블의 파티션테이블 partition table 조회

SELECT

    c.oid id

   ,c.oid

   ,ns.nspname

   ,c.relname

   ,c.relname table_name

   ,c.relname text

   ,c.relhassubclass

   ,not(c.relhassubclass) leaf

   ,pr.parname

   ,t.table_type

   ,'TABLE' object_type

   ,c.relstorage

FROM pg_class c

    INNER JOIN pg_partition_rule pr ON c.oid = pr.parchildrelid

    INNER JOIN pg_inherits i ON c.oid = i.inhrelid

    INNER JOIN pg_namespace ns ON c.relnamespace = ns.oid

    INNER JOIN information_schema.tables t ON (t.table_name = c.relname AND ns.oid = c.relnamespace)

WHERE

    c.relkind = 'r' AND

    i.inhparent = #{oid}::oid

ORDER BY

    c.relname;


파티션 테이블 partition table 의 정보 조회

SELECT

    schemaname

   ,tablename

   ,partitiontablename

   ,partitionname

   ,parentpartitiontablename

   ,parentpartitionname

   ,partitiontype

   ,partitionlevel

   ,partitionrank

   ,partitionposition

   ,partitionlistvalues

   ,partitionrangestart

   ,partitionstartinclusive

   ,partitionrangeend

   ,partitionendinclusive

   ,partitioneveryclause

   ,partitionisdefault

   ,partitionboundary

   ,parenttablespace

   ,partitiontablespace

FROM

    pg_partitions

WHERE

    schemaname = #{schemaName} AND

    partitiontablename = #{tableName};


분산키 distributed key 조회

SELECT

    cl.relname

   ,array_to_string(dp.attrnums, ',') attrnums

FROM

    gp_distribution_policy dp

INNER JOIN pg_class cl ON dp.localoid = cl.oid

LEFT JOIN pg_namespace ns ON cl.relnamespace = ns.oid

WHERE

    ns.nspname = #{schemaName} AND

cl.relname = #{tableName};


실행중인 쿼리 세션 중지 query session kill terminate

SELECT pg_terminate_backend(#{pid});


모든 리소스큐 resource queue 조회

SELECT

    q.oid::int as queueid,

    q.rsqname as rsqname,

t1.value::int as rsqcountlimit,

t2.value::int as rsqcountvalue,

t3.value::real as rsqcostlimit,

t4.value::real as rsqcostvalue,

t5.value::real as rsqmemorylimit,

t6.value::real as rsqmemoryvalue,

t7.value::int as rsqwaiters,

t8.value::int as rsqholders

FROM pg_resqueue q,

pg_resqueue_status_kv() t1 (queueid oid, key text, value text),

pg_resqueue_status_kv() t2 (queueid oid, key text, value text),

pg_resqueue_status_kv() t3 (queueid oid, key text, value text),

pg_resqueue_status_kv() t4 (queueid oid, key text, value text),

pg_resqueue_status_kv() t5 (queueid oid, key text, value text),

pg_resqueue_status_kv() t6 (queueid oid, key text, value text),

pg_resqueue_status_kv() t7 (queueid oid, key text, value text),

pg_resqueue_status_kv() t8 (queueid oid, key text, value text)

WHERE

q.oid = t1.queueid

AND t1.queueid = t2.queueid

AND t2.queueid = t3.queueid

AND t3.queueid = t4.queueid

AND t4.queueid = t5.queueid

AND t5.queueid = t6.queueid

AND t6.queueid = t7.queueid

AND t7.queueid = t8.queueid

AND t1.key = 'rsqcountlimit'

AND t2.key = 'rsqcountvalue'

AND t3.key = 'rsqcostlimit'

AND t4.key = 'rsqcostvalue'

AND t5.key = 'rsqmemorylimit'

AND t6.key = 'rsqmemoryvalue'

AND t7.key = 'rsqwaiters'

AND t8.key = 'rsqholders'

ORDER BY

    q.rsqname

   ,t1.value;


모든 그룹롤 group role 조회

SELECT

    oid::int

   ,rolname

   ,rolsuper

   ,rolinherit

   ,rolcreaterole

   ,rolcreatedb

   ,rolcatupdate

   ,rolcanlogin

   ,rolconnlimit

   ,rolvaliduntil

   ,rolconfig

   ,rolresqueue

   ,rolcreaterextgpfd

   ,rolcreaterexthttp

   ,rolcreatewextgpfd

   ,rolcreaterexthdfs

   ,rolcreatewexthdfs

FROM

pg_roles

WHERE

    not(rolcanlogin)

ORDER BY

    rolname;


모든 로그인롤 login role 조회

SELECT

    oid::int

   ,rolname

   ,rolsuper

   ,rolinherit

   ,rolcreaterole

   ,rolcreatedb

   ,rolcatupdate

   ,rolcanlogin

   ,rolconnlimit

   ,rolvaliduntil

   ,rolconfig

   ,rolresqueue

   ,rolcreaterextgpfd

   ,rolcreaterexthttp

   ,rolcreatewextgpfd

   ,rolcreaterexthdfs

   ,rolcreatewexthdfs

FROM

pg_roles

WHERE

    rolcanlogin

ORDER BY

    rolname;


특정 롤 role 조회

SELECT

    ro.oid::int

   ,ro.rolname

   ,ro.rolsuper

   ,ro.rolinherit

   ,ro.rolcreaterole

   ,ro.rolcreatedb

   ,ro.rolcatupdate

   ,ro.rolcanlogin

   ,ro.rolconnlimit

   ,to_char(ro.rolvaliduntil, 'YYYY-MM-DD') rolvaliduntil

   ,ro.rolconfig

   ,ro.rolresqueue

   ,rq.rsqname

   ,ro.rolcreaterextgpfd

   ,ro.rolcreaterexthttp

   ,ro.rolcreatewextgpfd

   ,ro.rolcreaterexthdfs

   ,ro.rolcreatewexthdfs

FROM

pg_roles ro

INNER JOIN pg_resqueue rq ON ro.rolresqueue = rq.oid

WHERE

    ro.oid = #{oid};


실행중인 세션 session 조회

SELECT

    datid

   ,datname

   ,procpid

   ,sess_id

   ,usesysid

   ,usename _username

   ,CASE WHEN length(current_query) > 1022 THEN current_query||'(...)'

         ELSE current_query

    END current_query

   ,waiting

   ,to_char(query_start, 'YYYY-MM-DD HH24:MI:SS:MS') query_start

   ,to_char(backend_start, 'YYYY-MM-DD HH24:MI:SS:MS') backend_start

   ,client_addr

   ,client_port

   ,application_name

   ,to_char(xact_start, 'YYYY-MM-DD HH24:MI:SS:MS') xact_start

FROM

    pg_stat_activity

WHERE

    current_query not like '%<IDLE>%'

ORDER BY

    query_start

LIMIT

    ${limit}

OFFSET

    ${start};


Lock table 조회

SELECT

    DISTINCT

        w.locktype

       ,w.relation::regclass AS relation

       ,w.mode

       ,w.pid AS waiting_pid

       ,w_stm.current_query waiting_query

       ,other.pid AS other_pid

       ,other_stm.current_query other_query

FROM

    pg_catalog.pg_locks AS w

        JOIN pg_catalog.pg_stat_activity AS w_stm ON w_stm.procpid = w.pid

        JOIN pg_catalog.pg_locks AS other ON

            (w.DATABASE = other.DATABASE AND

                w.relation = other.relation) OR

            w.transactionid = other.transactionid

        JOIN pg_catalog.pg_stat_activity AS other_stm ON other_stm.procpid = other.pid

WHERE

    NOT w.granted AND

    w.pid <> other.pid;


데이터베이스 크기 database size 조회

SELECT

    pg_size_pretty(pg_database_size(#{databaseName}))||

        CASE WHEN pg_database_size(#{databaseName}) > 1023 THEN

            ' ('||

            pg_database_size(#{databaseName})||

            ' bytes)'

            ELSE '' END size;


스키마 크기 schema size 조회

SELECT

    pg_size_pretty(sum(pg_relation_size(schemaname||'.'||tablename))::bigint)||

            CASE WHEN sum(pg_relation_size(schemaname||'.'||tablename))::bigint > 1023 THEN

                ' ('||

                sum(pg_relation_size(schemaname||'.'||tablename))::bigint||

                ' bytes)'

                ELSE '' END size

FROM

    pg_tables

WHERE

    schemaname = #{schemaName};


테이블 크기 table size 조회

SELECT

    pg_size_pretty(pg_relation_size(#{dbSchemaTable}))||

        CASE WHEN pg_relation_size(#{dbSchemaTable}) > 1023 THEN

            ' ('||

            pg_relation_size(#{dbSchemaTable})||

            ' bytes)'

            ELSE '' END size;


어플리케이션 이름 세팅 set application name (쿼리 실행 전 실행해주면 세션 조회에서 조회 가능함)

set application_name=#{appName};


데이터베이스 삭제 drop database

END;

DROP DATABASE IF EXISTS ${dropDatabaseName};

BEGIN;


스키마 삭제 drop schema

DROP SCHEMA IF EXISTS ${schemaName} RESTRICT;


테이블 삭제 drop table

DROP TABLE IF EXISTS ${dbSchemaTable} RESTRICT;


external 테이블 삭제 drop external table

DROP EXTERNAL TABLE IF EXISTS ${dbSchemaTable} RESTRICT;


뷰 삭제 drop view

DROP VIEW IF EXISTS ${dbSchemaView} RESTRICT;


함수 삭제 drop function

DROP FUNCTION IF EXISTS ${dbSchemaFunction} (${proargtypes}) RESTRICT;


리소스큐 삭제 drop resource queue

DROP RESOURCE QUEUE ${queueName};


롤 삭제 drop role

DROP ROLE ${rolname};

반응형

'[BigData] > [Pivotal HAWQ]' 카테고리의 다른 글

hawq 에서 show tables, databases, columns, describe  (1) 2015.02.26
pivotal hawq download  (0) 2015.02.24
Posted by FeliZ_하늘..
,
반응형

https://network.pivotal.io/products/pivotal-hd


회원가입 후 다운로드 가능함


Pivotal HD 2.1.0 Single Node VM 2.39 GB


이거 하나만 받으면 된다고 함

반응형
Posted by FeliZ_하늘..
,