Monday 11 June 2012

LEMP and Drupal Installation



1. Nginx Installation

# aptitude install libpcre3-dev zlib1g-dev libgeoip-dev
Install nginx from source

then compile
# ./configure --without-mail_pop3_module --without-mail_imap_module --without-mail_smtp_module --with-http_geoip_module --with-http_stub_status_module --prefix=/opt/nginx
# make
# make install

# ln -s /opt/nginx/logs/ /var/log/nginx
# ln -s /opt/nginx/conf/ /etc/nginx
# ln -s /opt/nginx/sbin/nginx /usr/local/sbin/nginx

add init script

# mkdir /etc/nginx/sites-{enabled,available}

# vim /etc/nginx/nginx.conf

user  www-data;
server_tokens off;
include sites-enabled/*;

remove server block

2. PHP Installation

# echo "deb http://packages.dotdeb.org stable all" >> /etc/apt/sources.list

Add key:
# wget http://www.dotdeb.org/dotdeb.gpg
# cat dotdeb.gpg | sudo apt-key add -
# rm dotdeb.gpg
# apt-get update
# apt-get install php5 php5-fpm php-pear php5-common php5-mcrypt php5-mysql php5-cli php5-gd

php-fpm configuration:

# edit /etc/php5/fpm/php5-fpm.conf

some directives to be tweaked
pm.max_children = 25
pm.start_servers = 4
pm.min_spare_servers = 2
pm.max_spare_servers = 10
pm.max_requests = 500
request_terminate_timeout = 30s

restart to activate new settings:

# /etc/init.d/php5-fpm restart

3. MySQL Installation

# aptitude install mysql-server
# vim /root/.my.cnf

[client]
password = Root Passwd

4. Drupal Installation

# cd /opt
# wget http://ftp.drupal.org/files/projects/drupal-7.7.tar.gz
# tar xvfz drupal-7.7.tar.gz
# mv drupal-7.7/ drupal
# chown -R www-data:www-data /opt/drupal/
# mysqladmin -u root -p create drupal
# mysql -u root
mysql> GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, INDEX, ALTER ON drupal.* TO 'drupal_admin'@'localhost' IDENTIFIED BY 'drupal_admin_password';
mysql> GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, INDEX, ALTER ON drupal.* TO 'drupal_admin'@'localhost.localdomain' IDENTIFIED BY 'drupal_admin_password';
mysql> FLUSH PRIVILEGES;
mysql> quit;

# vim /etc/nginx/sites-available/drupal.conf

server {
       listen 80;
       root /opt/drupal;

       index index.php index.html;

       location = /favicon.ico {
                log_not_found off;
                access_log off;
       }

       location = /robots.txt {
                allow all;
                log_not_found off;
                access_log off;
       }

       # Make sure files with the following extensions do not get loaded by nginx because nginx would display the source code, and these files can contain PASSWORDS!
        location ~* \.(engine|inc|info|install|make|module|profile|test|po|sh|.*sql|theme|tpl(\.php)?|xtmpl)$|^(\..*|Entries.*|Repository|Root|Tag|Template)$|\.php_ {
                deny all;
        }

       # Deny all attempts to access hidden files such as .htaccess, .htpasswd, .DS_Store (Mac).
       location ~ /\. {
                deny all;
                access_log off;
                log_not_found off;
       }

       location / {
                try_files $uri $uri/ /index.php?$args;
       }

       location ~*  \.(jpg|jpeg|png|gif|css|js|ico)$ {
                expires max;
                log_not_found off;
       }

       location ~ \.php$ {
                include /etc/nginx/fastcgi_params;
                fastcgi_pass 127.0.0.1:9000;
                fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
       }
}

# cd /etc/nginx/sites-enabled/
# ln -s /etc/nginx/sites-available/www.example.com.vhost www.example.com.vhost
# /etc/init.d/nginx reload

Saturday 18 February 2012

Disable Currenty Protected Targeted Daemon With SELinux

The default is that http is jailed under httpd_t domain

# ps -efZ | grep http

unconfined_u:system_r:httpd_t:s0 root         2913  1         0 11:30 ?        00:00:00 /usr/sbin/httpd
unconfined_u:system_r:httpd_t:s0 apache   2915  2913  0 11:30 ?        00:00:00 /usr/sbin/httpd
unconfined_u:system_r:httpd_t:s0 apache   2916  2913  0 11:30 ?        00:00:00 /usr/sbin/httpd
unconfined_u:system_r:httpd_t:s0 apache   2917  2913  0 11:30 ?        00:00:00 /usr/sbin/httpd
unconfined_u:system_r:httpd_t:s0 apache   2918  2913  0 11:30 ?        00:00:00 /usr/sbin/httpd
unconfined_u:system_r:httpd_t:s0 apache   2919  2913  0 11:30 ?        00:00:00 /usr/sbin/httpd
unconfined_u:system_r:httpd_t:s0 apache   2920  2913  0 11:30 ?        00:00:00 /usr/sbin/httpd
unconfined_u:system_r:httpd_t:s0 apache   2921  2913  0 11:30 ?        00:00:00 /usr/sbin/httpd
unconfined_u:system_r:httpd_t:s0 apache   2922  2913  0 11:30 ?        00:00:00 /usr/sbin/httpd
unconfined_u:system_r:httpd_t:s0 apache   2923  2913  0 11:30 ?        00:00:00 /usr/sbin/httpd

To disable SELinux on httpd we will change the /usr/sbin/httpd default target

# ls -lZ /usr/sbin/httpd
-rwxr-xr-x. root root system_u:object_r:httpd_exec_t:s0 /usr/sbin/httpd

# chcon -t unconfined_exec_t /usr/sbin/httpd
# service httpd restart
# ls -lZ /usr/sbin/httpd
-rwxr-xr-x. root root system_u:object_r:unconfined_exec_t:s0 /usr/sbin/httpd
# ps -efZ | grep http

unconfined_u:unconfined_r:unconfined_t:s0 root      3077  1        0 11:42 ?  00:00:00 /usr/sbin/httpd
unconfined_u:unconfined_r:unconfined_t:s0 apache 3079 3077  0 11:42 ?  00:00:00 /usr/sbin/httpd
unconfined_u:unconfined_r:unconfined_t:s0 apache 3080 3077  0 11:42 ?  00:00:00 /usr/sbin/httpd
unconfined_u:unconfined_r:unconfined_t:s0 apache 3081 3077  0 11:42 ?  00:00:00 /usr/sbin/httpd
unconfined_u:unconfined_r:unconfined_t:s0 apache 3082 3077  0 11:42 ?  00:00:00 /usr/sbin/httpd
unconfined_u:unconfined_r:unconfined_t:s0 apache 3083 3077  0 11:42 ?  00:00:00 /usr/sbin/httpd
unconfined_u:unconfined_r:unconfined_t:s0 apache 3084 3077  0 11:42 ?  00:00:00 /usr/sbin/httpd
unconfined_u:unconfined_r:unconfined_t:s0 apache 3085 3077  0 11:42 ?  00:00:00 /usr/sbin/httpd
unconfined_u:unconfined_r:unconfined_t:s0 apache 3086 3077  0 11:42 ?  00:00:00 /usr/sbin/httpd
unconfined_u:unconfined_r:unconfined_t:s0 apache 3087 3077  0 11:42 ?  00:00:00 /usr/sbin/httpd

To revert back to original target

# restorecon /usr/sbin/httpd

Wednesday 8 February 2012

Setting Up A High-Availability Load Balancer-With Failover and Session Support- With HAProxy-Heartbeat

This tutorial explains how to set up a two-node load balancer IN AN active/passive configuration with HAProxy and heartbeat.The load balancer sits between the user and two (or more) backend Apache web servers that hold the same content. Not only does the load balancer distribute the requests to the two backend Apache servers, it also checks the health of the backend servers. If one of them is down, all requests will automatically be redirected to the remaining backend server. In addition to that, the two load balancer nodes monitor each other using heartbeat, and if the master fails, the slave becomes the master, which means the users will not notice any disruption of the service. HAProxy is session-aware, which means you can use it with any web application that makes use of sessions (such as forums, shopping carts, etc.).

1. Preliminary Note

    Load Balancer 1: lb1.example.com, IP address: 192.168.0.100
    Load Balancer 2: lb2.example.com, IP address: 192.168.0.101
    Web Server 1: http1.example.com, IP address: 192.168.0.102
    Web Server 2: http2.example.com, IP address: 192.168.0.103
    We also need a virtual IP address that floats between lb1 and lb2: 192.168.0.99

2. Preparing The Backend Web Servers

We will configure HAProxy as a transparent proxy, i.e., it will pass on the original user's IP address in a field called X-Forwarded-For to the backend web servers. Of course, the backend web servers should log the original user's IP address in their access logs instead of the IP addresses of our load balancers. Therefore we must modify the LogFormat line in /etc/apache2/apache2.conf and replace %h with %{X-Forwarded-For}i:

 http1/http2

# vim /etc/apache2/apache2.conf

#LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined

Also, we will configure HAProxy to check the backend servers' health by continuously requesting the file check.txt (translates to /var/www/check.txt if /var/www is your document root) from the backend servers. Of course, these requests would totally bloat the access logs and mess up your page view statistics (if you use a tool like Webalizer or AWstats that generates statistics based on the access logs).

Therefore we open our vhost configuration (in this example it's in /etc/apache2/sites-available/default) and put these two lines into it (comment out all other CustomLog directives in your vhost configuration):

# vim /etc/apache2/sites-available/default

SetEnvIf Request_URI "^/check\.txt$" dontlog
CustomLog /var/log/apache2/access.log combined env=!dontlog

Note:- This configuration above prevents that requests to check.txt get logged in Apache's access log

Afterwards we restart Apache:

# /etc/init.d/apache2 restart

... and create the file check.txt (this can be an empty file):

# touch /var/www/check.txt

We are finished already with the backend servers; the rest of the configuration happens on the two load balancer nodes.

3. Installing HAProxy

lb1/lb2

We can install HAProxy as follows:

# aptitude install haproxy

4. Configuring The Load Balancers

We back up the original /etc/haproxy/haproxy.cfg and create a new one like this:

 lb1/lb2

# cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg_orig
# echo > /etc/haproxy/haproxy.cfg
# vim /etc/haproxy/haproxy.cfg

global
        log 127.0.0.1   local0
        log 127.0.0.1   local1 notice
        #log loghost    local0 info
        maxconn 4096
        #debug
        #quiet
        user haproxy
        group haproxy

defaults
        log     global
        mode    http
        option  httplog
        option  dontlognull
        retries 3
        option redispatch
        maxconn 2000
        contimeout      5000
        clitimeout      50000
        srvtimeout      50000

listen webfarm 192.168.0.99:80
       mode http
       stats enable
       stats auth "username":"password"
       balance roundrobin
       cookie JSESSIONID prefix
       option httpclose
       option forwardfor
       option httpchk HEAD /check.txt HTTP/1.0
       server webA 192.168.0.102:80 cookie A check
       server webB 192.168.0.103:80 cookie B check

Afterwards, we set ENABLED to 1 in /etc/default/haproxy:

# vim /etc/default/haproxy

5. Setting Up Heartbeat


We've just configured HAProxy to listen on the virtual IP address 192.168.0.99, but someone has to tell lb1 and lb2 that they should listen on that IP address. This is done by heartbeat which we install like this:

lb1/lb2

# aptitude install heartbeat

To allow HAProxy to bind to the shared IP address, we add the following line to /etc/sysctl.conf:

# vim /etc/sysctl.conf

net.ipv4.ip_nonlocal_bind=1

... and run:

# sysctl -p

Now we have to create three configuration files for heartbeat, /etc/ha.d/authkeys, /etc/ha.d/ha.cf, and /etc/ha.d/haresources. /etc/ha.d/authkeys and /etc/ha.d/haresources must be identical on lb1 and lb2, and /etc/ha.d/ha.cf differs by just one line!

lb1/lb2

# vim /etc/ha.d/authkeys

auth 3
3 md5 somerandomstring

Note:- somerandomstring is a password which the two heartbeat daemons on lb1 and lb2 use to authenticate against each other

/etc/ha.d/authkeys should be readable by root only, therefore we do this:
# chmod 600 /etc/ha.d/authkeys

lb1

# vim /etc/ha.d/ha.cf

#
#       keepalive: how many seconds between heartbeats
#
keepalive 2
#
#       deadtime: seconds-to-declare-host-dead
#
deadtime 10
#
#       What UDP port to use for udp or ppp-udp communication?
#
udpport        694
bcast  eth0
mcast eth0 225.0.0.1 694 1 0
ucast eth0 192.168.0.101
#       What interfaces to heartbeat over?
#udp     eth0
#
#       Facility to use for syslog()/logger (alternative to log/debugfile)
#
logfacility     local0
#
#       Tell what machines are in the cluster
#       node    nodename ...    -- must match uname -n --> Very Important
node    lb2.example.com
node    lb2.example.com

Note:- on lb1 and lb2.The udpport, bcast, mcast, and ucast options specify how the two heartbeat nodes communicate with each other to find out if the other node is still alive. You can leave the udpport, bcast, and mcast lines as shown above, but in the ucast line it's important that you specify the IP address of the other heartbeat node in this case it's 192.168.0.101 (lb2.example.com).


On lb2 the file looks pretty much the same, except that the ucast line holds the IP address of lb1

lb2

# vim /etc/ha.d/ha.cf

#
#       keepalive: how many seconds between heartbeats
#
keepalive 2
#
#       deadtime: seconds-to-declare-host-dead
#
deadtime 10
#
#       What UDP port to use for udp or ppp-udp communication?
#
udpport        694
bcast  eth0
mcast eth0 225.0.0.1 694 1 0
ucast eth0 192.168.0.100
#       What interfaces to heartbeat over?
#udp     eth0
#
#       Facility to use for syslog()/logger (alternative to log/debugfile)
#
logfacility     local0
#
#       Tell what machines are in the cluster
#       node    nodename ...    -- must match uname -n --> Very Important
node    lb1.example.com
node    lb2.example.com

lb1/lb2

# vim /etc/ha.d/haresources

lb1.example.com 192.168.0.99 --> must match uname -n

lb1/lb2

# /etc/init.d/heartbeat start

Then run:

lb1

# ip addr sh eth0

... and you should find that lb1 is now listening on the shared IP address, too:

lb1:/etc/ha.d# ip addr sh eth0
2: eth0: mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:63:f7:5c brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.100/24 brd 192.168.0.255 scope global eth0
    inet 192.168.0.99/24 brd 192.168.0.255 scope global secondary eth0:0
    inet6 fe80::20c:29ff:fe63:f75c/64 scope link
       valid_lft forever preferred_lft forever
lb1:/etc/ha.d#

You can check this again by running:

# ifconfig

eth0:0    Link encap:Ethernet  HWaddr 00:0c:29:63:f7:5c
          inet addr:192.168.0.99  Bcast:192.168.0.255  Mask:255.255.255.0
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          Interrupt:18 Base address:0x1400

As lb2 is the passive load balancer, it should not be listening on the virtual IP address as long as lb1 is up. We can check that with:

lb1

# ip addr sh eth0

The output should look like this:

lb2:~# ip addr sh eth0
2: eth0: mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:be:7b:3b brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.101/24 brd 192.168.0.255 scope global eth0
    inet6 fe80::20c:29ff:febe:7b3b/64 scope link
       valid_lft forever preferred_lft forever
lb2:~#

The output of ifconfig will not include eth0:0

6. Starting HAProxy

Note:- You have to stop apache since HAproxy listen on port 80 and the haproxy service will not start as apache service is on

Now we can start HAProxy:

lb1/lb2

# /etc/init.d/haproxy start

7. Testing

You can now make HTTP requests to the virtual IP address 192.168.0.99 (or to any domain/hostname that is pointing to the virtual IP address), and you should get content from the backend web servers.

You can test its high-availability/failover capabilities by switching off one backend web server - the load balancer should then redirect all requests to the remaining backend web server. Afterwards, switch off the active load balancer (lb1) - lb2 should take over immediately. You can check that by running:

8. HAProxy Statistics

You might have noticed that we have used the options stats enable and stats auth someuser:somepassword in the HAProxy configuration in chapter 4. This allow us to access (password-protected) HAProxy statistics under the URL http://192.168.0.99/haproxy?stats

   

Sunday 22 January 2012

Asterisk Installation and Configuration

# yum install gcc bison flex bison-devel gcc-c++ ncurses-devel libxml2-devel make
# wget http://downloads.asterisk.org/pub/telephony/asterisk/releases/asterisk-1.8.8.0.tar.gz
# tar -xvzf asterisk-1.8.8.0.tar.gz && cd asterisk-1.8.8.0
# ./configure
# make
# make install
# make samples
# echo > /etc/asterisk/sip.conf
# echo > /etc/asterisk/extensions.conf
# vim /etc/asterisk/sip.conf

[general]  
port=5060
bindaddr=0.0.0.0

[1000]
type=friend
host=dynamic
secret=1000

[1001]
type=friend
host=dynamic
secret=1001


Note:- The general section contains main configurations for the asterisk server We have 2 users named 1000 and 1001 with type=friend which mean these users can call in and out, secret parameter is used to set password for that user

# vim /etc/asterisk/extensions.conf

[default]
exten => 999,1,Answer()
exten => 999,2,Playback(hello-world)
exten => 999,3,Hangup()
exten => 1000,1,Dial(SIP/1000)
exten => 1001,1,Dial(SIP/1001)  --> exten => 1000,1,Dial(SIP/User Name) for example if you dailed 1000 in will call user name 1000

Note:- syntax in the file take the following form exten => number that is dialed,step number,function
i.e dailing 999 follow 3 steps first answer, second playback recorded message "hello world" and third hang up.

Note:- These sounds and recorded messages can be found in /var/lib/asterisk/sounds/

# asterisk --> To start asterisk daemon
# asterisk -r
localhost*CLI> reload
localhost*CLI> exit

then use proper VoIP client to connect to the server with one user and dail the other user

Thursday 19 January 2012

2 Node Cluster with PMICH2 and Benchmarking

Creating 2 Nodes Cluster Using PMICH2 on Centos 5.5 x86


We will use 3 VMs

master    192.168.1.120
node1    192.168.1.121
node2    192.168.1.122

1. Editing /etc/hosts

Master

# vim /etc/hosts
127.0.0.1       localhost.localdomain   localhost
192.168.1.120   master
192.168.1.121   node1
192.168.1.122   node2

# scp /etc/hosts node1:/etc/
# scp /etc/hosts node2:/etc/

2. Creating SSH Keys

Master

# ssh key-gen --> Don't change anything
# cat /root/.ssh/*.pub > /root/.ssh/authorized_keys
# scp -r /root/.ssh/ node1:/root/
# scp -r /root/.ssh/ node2:/root/

3. Install PDSH ( Parallel Distribution SHell ) to control more than one machine at once

Master

Install Rpmforge Repo

# rpm --import http://apt.sw.be/RPM-GPG-KEY.dag.txt --> If you got an error this mean it installed before
# rpm -Uvh http://packages.sw.be/rpmforge-release/rpmforge-release-0.5.2-2.el5.rf.i386.rpm --> Or download it with wget and install it locally
# yum install pdsh --> For distributing commands to all nodes once at the same time
# vim /etc/pdsh/machines
node2
node2
master

# pdsh -a uptime --> you should have the result of the 3 machines

Note:- If any machine didn't respond and you sure all the above configurations are ok try to change the orde of the machines in /etc/pdsh/machines

4. Install Time Server to Disallow Time Variances Between The Nodes

Install this time server in any other machine for example the host hosting this VMs "In my case the host IP 192.168.1.2"

# yum install ntp
# vim /etc/ntp.conf

#server 0.centos.pool.ntp.org   
#server 1.centos.pool.ntp.org   -->  Comment them
#server 2.centos.pool.ntp.org   

server  127.127.1.0 # local clock
fudge   127.127.1.0 stratum 10     -->  Make sure that they are uncommented

# /etc/init.d/ntpd start
# chkconfig ntpd on

Master

phsh -a yum -y install ntp
# vim /etc/ntp.conf

server 192.168.1.2

#server 0.centos.pool.ntp.org        
#server 1.centos.pool.ntp.org         
#server 2.centos.pool.ntp.org          --> Comment them
#server  127.127.1.0 # local clock
#fudge   127.127.1.0 stratum 10    

# scp /etc/ntp.conf node1:/etc/
# scp /etc/ntp.conf node2:/etc/
# pdsh -a /etc/init.d/ntpd start
# pdsh -a chkconfig ntpd on

5. Sharing The /cluster Using NFS

Master

# yum install nfs-utils.i386
# vim /etc/exports

/cluster    *(rw,sync,no_root_squash)

# mkdir /cluster
# /etc/init.d/portmap start
# /etc/init.d/nfs start
# chkconfig nfs on
# chkconfig portmap on
# pdsh -w node1,node2 mkdir /cluster
# pdsh -w node1,node2 yum -y install nfs-utils
# pdsh -w node1,node2 /etc/init.d/portmap start
# pdsh -w node1,node2 mount.nfs master:/cluster /cluster
# pdsh -w node1,node2 chkconfig nfs on
# pdsh -w node1,node2 chkconfig portmap on

Node1 Node2

# vim /etc/fstab
master:/cluster    /cluster    nfs    defaults    0 0

Note:- If you reboot the VMs or Start it any other time make sure that the master VM start first

6. Creating mpiuser and his SSH keys

Master

# pdsh -a groupadd -g 1000 mpigroup
# pdsh -a useradd -u 1000 -g 1000 -d /cluster/mpiuser mpiuser
# pdsh -a yum -y install gcc gcc-c++.i386 compat-gcc-34-g77.i386
$ su - mpiuser
$ ssh key-gen --> Don't change anything
$ cat ~/.ssh/*.pub > ~/.ssh/authorized_keys


7. Installing MPICH2

Master

# yum -y install patch
# cd /cluster && wget http://www.mcs.anl.gov/research/projects/mpich2/downloads/tarballs/1.4.1p1/mpich2-1.4.1p1.tar.gz
# chown mpiuser.mpigroup -R /cluster
# su mpiuser
$ cd /cluster && tar -xvzf mpich2-1.4.1p1.tar.gz
$ cd mpich2-1.4.1p1 && ./configure --prefix=/cluster/mpich2
$ make && make install
$ vim ~/.bash_profile --> Edit it as follow

PATH=$PATH:$HOME/bin:/cluster/mpich2/bin
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/cluster/mpich2/lib

export PATH LD_LIBRARY_PATH

$ source ~/.bash_profile
$ vim /cluster/mpiuser/hosts
node1
node2

$ mpiexec -f /cluster/mpiuser/hosts hostname --> results must be as follow
node1
node2

Note:- Make sure that print fingers of all nodes are saved in know_hosts of mpiuser

$ mpiexec -n 1 -f  /cluster/mpiuser/hosts /cluster/mpich2-1.4.1p1/examples/cpi --> Test execution with one node
$ mpiexec -n 2 -f  /cluster/mpiuser/hosts /cluster/mpich2-1.4.1p1/examples/cpi --> Test execution with two nodes

Note :- Since we are using VMs on the same host you won't notice any changes and sometimes the indicator will increase instead of decrease but in  
        real world you will happy with the results.

Now Using The Cluster To Compile a File And Make Another Test

$ mpicc -o /cluster/mpich2-1.4.1p1/examples/icpi /cluster/mpich2-1.4.1p1/examples/icpi.c
$ mpiexec -f /cluster/mpiuser/hosts   -n 1 /cluster/mpich2-1.4.1p1/examples/icpi --> Add intervals say 1000000 then repeat the test with -n 2

8. Installating Benchmark Tool "Linpack"

Master

$ cd && wget http://ftp.freebsd.org/pub/FreeBSD/ports/distfiles/gotoblas/GotoBLAS2-1.13_bsd.tar.gz
$ tar -xvzf GotoBLAS2-1.13_bsd.tar.gz --> Special library for Linpack
$ cd GotoBLAS2
$ make TARGET=NEHALEM

$ cd && wget http://www.netlib.org/benchmark/hpl/hpl-2.0.tar.gz
$ tar -xvzf hpl-2.0.tar.gz && cd hpl-2.0
$ cp setup/Make.Linux_PII_FBLAS_gm .
$ vim Make.Linux_PII_FBLAS_gm --> Edit the following directives as follow
TOPdir       = $(HOME)/hpl-2.0
LAdir        = $(HOME)/GotoBLAS2
LAinc        =
LAlib        = $(LAdir)/libgoto2.a -lm -L/usr/lib/gcc/i386-redhat-linux/4.1.2 --> This is the path of gcc ver 4.1.2 make sure it does exit
CCFLAGS      = $(HPL_DEFS) -O3
LINKER       = mpicc

$ make arch=Linux_PII_FBLAS_gm
$ mkdir -p /cluster/mpiuser/hpl/
$ cp Make.Linux_PII_FBLAS_gm /cluster/mpiuser/hpl/

Note:- I made the last 2 steps to get around an error in the compilation process

9. Cluster Benchmarking

Master

$ cd /cluster/mpiuser/hpl-2.0/bin/Linux_PII_FBLAS_gm
$ cp HPL.dat HPL.dat.bak

To Determine The Size of The Problem

$ free -b --> To get the number of free blocks in RAM, in my case 181088256.Apply it in the following formula in your calculator using bc command

Note:- The free command should be executed any any node not the master

sqrt ( .1 * 181088256 * 2 ) --> 2 is the number of nodes result is 6018.1
$ vim HPL.dat --> Edit the following
6            device out (6=stdout,7=stderr,file)
1            # of problems sizes (N)
6000      Ns
1            # of NBs
100      NBs
0            PMAP process mapping (0=Row-,1=Column-major)
1            # of process grids (P x Q)
1        Ps
2        Qs
16.0         threshold
3            # of panel fact
0 1 2        PFACTs (0=left, 1=Crout, 2=Right)
2            # of recursive stopping criterium
2 4          NBMINs (>= 1)
1            # of panels in recursion
2            NDIVs
3            # of recursive panel fact.
0 1 2        RFACTs (0=left, 1=Crout, 2=Right)
1            # of broadcast
0            BCASTs (0=1rg,1=1rM,2=2rg,3=2rM,4=Lng,5=LnM)
1            # of lookahead depth
0            DEPTHs (>=0)
2            SWAP (0=bin-exch,1=long,2=mix)
64           swapping threshold
0            L1 in (0=transposed,1=no-transposed) form
0            U  in (0=transposed,1=no-transposed) form
1            Equilibration (0=no,1=yes)
8            memory alignment in double (> 0)


$ mpiexec -f ~/mpd.hosts -n 2 ./xhpl --> This will run many tests to benchmark performance

Note :- Tweak the HPL.dat configuration untill you get maximum utilization of CPU, for me after tweaking the configuration I got the following is the results in top command for node1 and node2

  PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+ COMMAND               
 2136 mpiuser   25   0 1564m 1.5g 1188 R 100.2 76.6   1:00.74 xhpl

                

Saturday 14 January 2012

VNC Server

Server Side

# yum install tigervnc-server
# vncpasswd --> This is done by the user you want to access with
# vim /etc/sysconfig/vncservers --> Add the following line

VNCSERVERS="1:root"

Note:- If you want to add multiple users you can do it as follow
VNCSERVERS="1:user1 2:user2 3:user3"
OR you can do each user in one line as follow
VNCSERVERS="1:user1"
VNCSERVERS="2:user2"
VNCSERVERS="3:user3"

# /etc/init.d/vncserver start

Client Side

# yum install vncviewer
# vncviewer VNCserverIP:1 --> For user1
OR
# vncviewer VNCserverIP:2 --> For user2 ... etc

For Further Options of vncviewer Command You Can Consult Man Pages

# man vncviewer

Thursday 12 January 2012

Nessus Instalaltion

 Install Nessus from www.nessus.org
# rpm -Uvh Nessus.x.x.rpm --> extract the package
# /opt/nessus//sbin/nessus-adduser --> to add a user

Note:- Follow the instructions to add the new user. In rules section consult man pages for nessus-useradd
i.e
accept 192.168.1/16
aceept 10.10.5.0/8
deny 192.168.0.0/24

# Register in www.nessus.org and registeration email will be sent to you teeling you

Linux and Solaris Users :

To activate your account, simply execute the following command :

# /opt/nessus/bin/nessus-fetch --register AAAA-AAAA-AAAA-AAAA-AAAA


Note:- If auto_update is set to 'yes' in nessusd.conf, Nessus will update the plugins by itself otherwise use this command




# /opt/nessus/sbin/nessus-update-plugins

Note:- Nessus update its plugins after registration every 12 hours

# service nessusd start --> nessusd will not start untill you fetch and registration since plugins are installed

# Nessus provide web-interface to manage it http://localhost:8834

Scheduling Plugins Updates with Cron

# crontab -e and add 28 3 * * * /opt/nessus/sbin/nessus-update-plugins

Tuesday 10 January 2012

qmail installation from Source Code in Centos 5.5

# mkdir /root/qmail
# cd /root/qmail
# wget http://www.qmail.org/netqmail-1.06.tar.gz
# wget http://cr.yp.to/ucspi-tcp/ucspi-tcp-0.88.tar.gz --> inetd/xinetd substitute manages various daemon that qmail shipped with.
# wget http://cr.yp.to/daemontools/daemontools-0.76.tar.gz --> Provide some services such as management as well as logging though 2 interfaces service scan and supervise (svscan/supervise).

# tar -xvzf netqmail-1.06.tar.gz
# tar -xvzf ucspi-tcp-0.88.tar.gz
# mkdir /package
# mv daemontools-0.76.tar.gz /package
# mkdir /var/qmail
# cd /root/qmail/netqmail-1.06
# cp INSTALL.ids IDS --> This file used in compilation to create users and groups for qmail

 Note:- IDS file should include only the following
   groupadd nofiles                    
   useradd -g nofiles -d /var/qmail/alias alias  
   useradd -g nofiles -d /var/qmail qmaild        
   useradd -g nofiles -d /var/qmail qmaill       
   useradd -g nofiles -d /var/qmail qmailp       
   groupadd qmail                    
   useradd -g qmail -d /var/qmail qmailq        
   useradd -g qmail -d /var/qmail qmmailr        
   useradd -g qmail -d /var/qmail qmails        


# . IDS && echo $? --> To source the file in order to execute it and echo the exist status
# yum install gcc gcc-c++ make flex bison bison-devel
# make setup check --> To build source files
# ./config-fast mail.linux.org (FQDN) --> If the FQDN is not defined you can check it using hostname -f
# cd /root/qmail/ucspi-tcp-0.88
# yum install patch-2.5.4-31.el5 --> Make sure that the patch command is installed
# patch < /root/qmail/netqmail-1.06/other-patches/ucspi-tcp-0.88.errno.patch --> this will pactch ucspi patch to the environment
# make && make setup check
# cd /package
# tar -xvzf daemontools-0.76.tar.gz
# cd /package/admin/daemontools-0.76/src
# patch < /root/qmail/netqmail-1.06/other-patches/daemontools-0.76.errno.patch
# cd ..
# package/install
# vim /var/qmail/rc --> And add the following

#!/bin/sh

# Using stdout for logging
# Using control/defaultdelivery from qmail-local to deliver messages by default
exec env - PATH="/var/qmail/bin:$PATH" \
qmail-start "`cat /var/qmail/control/defaultdelivery`"

# chmod 755 /var/qmail/rc
# mkdir /var/log/qmail
# echo "./Mailbox" > /var/qmail/control/defaultdelivery

The qmailctl script

# wget http://lifewithqmail.org/qmailctl-script-dt70
# mv qmailctl-script-dt70 /var/qmail/bin/qmailctl
# chmod 755 /var/qmail/bin/qmailctl
# ln -s /var/qmail/bin/qmailctl /usr/bin

The supervise scripts

# mkdir -p /var/qmail/supervise/qmail-send/log
# mkdir -p /var/qmail/supervise/qmail-smtpd/log
# vim /var/qmail/supervise/qmail-send/run --> Add the following

#!/bin/sh
exec /var/qmail/rc

# vim /var/qmail/supervise/qmail-send/log/run --> Add the following

#!/bin/sh
exec /usr/local/bin/setuidgid qmaill /usr/local/bin/multilog t /var/log/qmail

# vim /var/qmail/supervise/qmail-smtpd/run --> Add the following

#!/bin/sh

QMAILDUID=`id -u qmaild`
NOFILESGID=`id -g qmaild`
MAXSMTPD=`cat /var/qmail/control/concurrencyincoming`
LOCAL=`head -1 /var/qmail/control/me`

if [ -z "$QMAILDUID" -o -z "$NOFILESGID" -o -z "$MAXSMTPD" -o -z "$LOCAL" ]; then
    echo QMAILDUID, NOFILESGID, MAXSMTPD, or LOCAL is unset in
    echo /var/qmail/supervise/qmail-smtpd/run
    exit 1
fi

if [ ! -f /var/qmail/control/rcpthosts ]; then
    echo "No /var/qmail/control/rcpthosts!"
    echo "Refusing to start SMTP listener because it'll create an open relay"
    exit 1
fi

exec /usr/local/bin/softlimit -m 2000000 \
    /usr/local/bin/tcpserver -v -R -l "$LOCAL" -x /etc/tcp.smtp.cdb -c "$MAXSMTPD" \
        -u "$QMAILDUID" -g "$NOFILESGID" 0 smtp /var/qmail/bin/qmail-smtpd 2>&1

Note: concurrencyincoming isn't a standard qmail control file. It's a feature of the above script. Also, that's -1 (dash one) on the LOCAL line and -l (dash ell) on the tcpserver line.

Note: The memory limit specified in the softlimit command may need to be raised depending upon your operating system and hardware platform. If attempts to connect to port 25 fail or remote systems are unable to send you mail, or you see a message like:

/usr/local/bin/tcpserver: error while loading shared libraries:
libc.so.6: failed to map segment from shared object: Cannot allocate memory try raising it to 3000000 or 4000000.

# echo 20 > /var/qmail/control/concurrencyincoming
# chmod 644 /var/qmail/control/concurrencyincoming
# vim /var/qmail/supervise/qmail-smtpd/log/run --> Add the following

#!/bin/sh
exec /usr/local/bin/setuidgid qmaill /usr/local/bin/multilog t /var/log/qmail/smtpd

# chmod 755 /var/qmail/supervise/qmail-send/run
# chmod 755 /var/qmail/supervise/qmail-send/log/run
# chmod 755 /var/qmail/supervise/qmail-smtpd/run
# chmod 755 /var/qmail/supervise/qmail-smtpd/log/run

# mkdir -p /var/log/qmail/smtpd
# chown qmaill /var/log/qmail /var/log/qmail/smtpd
# rm -rf /usr/sbin/sendmail   
# ln -s /var/qmail/bin/sendmail /usr/sbin
# rm -rf /usr/lib/sendmail
# ln -s /var/qmail/bin/sendmail /usr/lib
# echo  root > /var/qmail/alias/.qmail-root
# echo  root > /var/qmail/alias/.qmail-postmaster
# ln -s var/qmail/alias/.qmail-postmaster /var/qmail/alias/.qmail-mailer-daemon
# chmod 644 /var/qmail/alias/.qmail-root /var/qmail/alias/.qmail-postmaster
# echo '127.:allow,RELAYCLIENT=""' >>/etc/tcp.smtp
# qmailctl cdb
# ln -s /var/qmail/supervise/qmail-send /var/qmail/supervise/qmail-smtpd /service

Monday 9 January 2012

Snort NIDS Mode

Note:- Make sure that you applied the tutorial in the post correctly

1. Install MySQL DB Environment

# /etc/init.d/mysqld start
# mysql -u root -P(password)
mysql> create database snort;
mysql> grant select,insert on root.* to snort@localhost;
mysql> set password for snort@localhost=password('123');
mysql> grant create,insert,select,delete,update on snort.* to snort@localhost;
mysql> grant create,insert,select,delete,update on snort.* to snort;

2. Import MySQL DB schema

# mysql -u root -p < /root/snort-install/snort-2.9.1/schemas/create_mysql  snort

Note:- create_mysql will be found under snort source code directory in schema sub-directory

3. Setup Snort NIDS /etc/snort envirmonment

# mkdir /etc/snort && cp -a /root/snort-install/snort-2.9.1/etc/* /etc/snort/

4. Download the latest Snort rules and add it in /etc/snort/rules

# cd /etc/snort/ && tar -xzvf snortrules*

5. Configure /etc/snort/snort.conf to use MySQL and rules

# vim /etc/snort/snort.conf

ipvar HOME_NET 192.168.1.0/24
var RULE_PATH /etc/snort/rules                   
output database: log, mysql , user=<root> password=<123> dbname=<snort> host=<localhost>

6. Start Snort as NIDS mode

# snort -i eth0 -c /etc/snort/snort.conf

ERROR: parser.c(5260) Could not stat dynamic module path "/usr/local/lib/snort_dynamicrules": No such file or directory.
Fatal Error, Quitting..

# mkdir /usr/local/lib/snort_dynamicrules

ERROR: log_tcpdump: Failed to open log file "/var/log/snort/snort.log.1323643665": No such file or directory
Fatal Error, Quitting..

# mkdir /var/log/snort/

7. Setup BASE web analysis application

download adodb*.tgz and extract it in /var/www/html --> It provide connectivity for BASE and MySQL
# mv adodb5 adodb
download base form http://base.secureideas.net and extract it in /var/www/html/
# mv /var/www/html/base* /var/www/html/base && cd /var/www/html/base
# mv base_conf.php.dist base_conf.php
# vim base_conf.php
$BASE_urlpath = '/base';
$DBlib_path = '/var/www/html/adodb';
$DBtype = 'mysql';
$alert_dbname   = 'snort';
$alert_host     = 'localhost';
$alert_port     = '';
$alert_user     = 'snort';
$alert_password = '123';
# chown -R apache /var/www/html/base

OR
# mv /var/www/html/base/base_conf.php /var/www/html/base/base_conf.php.bak
Open your favorite web browser and go to: http://www.example.com/base/setup and follow the instructions


Note:- in /etc/php.ini modify the following variable
error_reporting = 'E_ALL & ~E_NOTICE'

This is a list of php packages I installed rpm -qa | grep php

php53-common-5.3.3-1.el5_7.3
php53-5.3.3-1.el5_7.3
php53-mysql-5.3.3-1.el5_7.3
php53-gd-5.3.3-1.el5_7.3
php53-cli-5.3.3-1.el5_7.3
php53-pdo-5.3.3-1.el5_7.3
php53-devel-5.3.3-1.el5_7.3

To make the Graph's from BASE work you will also need to install Image_Color, Image_Canvas and Image_Graph.
To do this do:


Note:- To install pear command for php53
# wget http://pear.php.net/go-pear.phar 
# php go-pear.phar and press enter      

pear install Image_Color
pear install Image_Canvas-alpha
pear install Image_Graph-alpha


To start SNORT and make BASE show you the Snort's logged info, you will need to run:

/usr/local/bin/snort -c /etc/snort/snort.conf -i eth0 -g root -D

Note:- Make sure that php and php-mysql are installed otherwise BASE will not be able to connect to MySQL

Sunday 8 January 2012

Snort Installation on Centos 5.5

The pre-requisites are:

    libtool(.x86_64)
    mysql-server
    gcc
    gcc-c++
    mysql-devel

The requisites are:

    libpcap >= 1.0
    daq-0.5
    libdnet-1.12
    pcre-8.12

I recomend using the DAG repository:
# rpm -Uhv http://apt.sw.be/redhat/el5/en/x86_64/rpmforge/RPMS//rpmforge-release-0.3.6-1.el5.rf.x86_64.rpm --> For 64bits
# rpm -Uvh http://apt.sw.be/redhat/el5/en/i386/rpmforge/RPMS/rpmforge-release-0.5.2-2.el5.rf.i386.rpm --> For 32bits

Then append the following in /etc/yum.repos.d/CentOS-Base.repo

[dag]
name=CentOS-$releasever – Contrib
mirrorlist=http://apt.sw.be/redhat/el4/en/$ARCH/dag
#baseurl=http://mirror.centos.org/centos/$releasever/contrib/$basearch/
gpgcheck=0
enabled=0

Then update:
# yum update

First off, you should take notice that the version of libpcap required by Snort,
The one in CentOS repositories is outdated they are only up to version 0.9.4.

Remove the current libpcap:
# yum remove libpcap libpcap-devel

Install some basic requisites:
# yum install gcc mysql-devel mysql-server libtool(.x86_64)
# yum -y install gcc-c++ make flex bison
# yum -y install libdnet(.x86_64) libdnet-devel(.x86_64)

Create a directory and put all needed packages here:
# mkdir snort-install && cd snort-install

Download the files:
snort-2.9.1.tar.gz
# wget http://www.snort.org/downloads/1107
daq-0.5.tar.gz
# wget http://www.snort.org/downloads/860
libpcap-1.1.1
# wget http://www.tcpdump.org/release/libpcap-1.1.1.tar.gz
pcre-8.12
# wget ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.13.tar.gz

LIBPCAP:

# tar -zxvf libpcap-1.1.1.tar.gz
# cd libpcap-1.1.1
# ./configure --prefix=/usr
# make all &&  make install
# ldconfig
# ldconfig -p | grep libpcap

DAQ:

# tar -zxvf daq-0.5.tar.gz
# cd daq-0.5
# ./configure --with-libpcap-libraries=/usr/lib/

PCRE:

# tar -zxvf pcre-8.12.tar.gz
# cd pcre-8.12
# ./configure --enable-utf8
# make all && make install

SNORT:

# tar -zxvf snort-2.9.0.5.tar.gz
# cd snort-2.9.0.5
# ./configure --with-mysql --enable-dynamicplugin --with-libpcap-libraries=/usr/lib --with-daq-libraries=/usr/local/lib/daq --enable-zlib --enable-gre --enable-mpls --enable-targetbased --enable-decoder-preprocessor-rules --enable-ppm --enable-perfprofiling
#  make &&  make install
# ln -s /usr/lib/libdnet.so.1.0.1 /usr/lib/libdnet.1

Consider Other Snort build options:
OPTIONS : –enable-ipv6 –enable-gre –enable-mpls –enable-targetbased –enable-decoder-preprocessor-rules –enable-ppm –enable-perfprofiling –enable-zlib

To make it work, you still need to download the rules package from the snort website, and copy to each correct folder
then setup the snort.conf file.


Problems You May Face


snort: error while loading shared libraries: libdnet.1: cannot open shared object file: No such file or directory
# ln -s /usr/lib/libdnet.so.1.0.1 /usr/lib/libdnet.1