Sometimes in order to build new stuff you have to destroy/remove/clean old stuff, but when you are on a Ambari server installation that fails for what ever reason when trying to configure your Hadoop Cluster starting for zero is not that easy because Ambari is not that good in tracking and erasing the failed installations.
Not sure why this but 100% of the times i try to use the agent-cleanup.py does not work. I am not trying to reinvent the wheel but the script simply does not work.
python /usr/lib/python2.6/site-packages/ambari_agent/HostCleanup.py
ambari-server stop ambari-agent stop
yum -y remove yum list installed | grep -i hadoop | cut -d. -f1 | sed -e :a -e '$!N; s/\n/ /; ta'
yum -y remove ambari*
yum -y remove yum list installed | grep -w 'HDP' | cut -d. -f1 | grep -v "^[ ]" | sed -e :a -e '$!N; s/\n/ /; ta'
yum -y remove yum list installed | egrep -w 'hcatalog|hive|hbase|zookeeper|oozie|pig|sqoop|snappy|hadoop-lzo|knox|hadoop|hue' | cut -d. -f1 | grep -v "^[ ]" | sed -e :a -e '$!N; s/\n/ /; ta'
yum -y remove yum list installed | egrep -w 'mysql|postgresql' | cut -d. -f1 | grep -v "^[ ]" | sed -e :a -e '$!N; s/\n/ /; ta'
rm -rf find /etc -maxdepth 1 | egrep -wi 'mysql|hcatalog|hive|hbase|zookeeper|oozie|pig|sqoop|snappy|hadoop|knox|hadoop|hue|ambari|tez|flume|storm|accumulo|spark|kafka|falcon|slider|ganglia|nagios|phoenix' | sed -e :a -e '$!N; s/\n/ /; ta'
rm -rf find /var/log -maxdepth 1 | egrep -wi 'mysql|hcatalog|hive|hbase|zookeeper|oozie|pig|sqoop|snappy|hadoop|knox|hadoop|hue|ambari|tez|flume|storm|accumulo|spark|kafka|falcon|slider|ganglia|nagios|phoenix' | sed -e :a -e '$!N; s/\n/ /; ta'
rm -rf find /tmp -maxdepth 1 | egrep -wi 'hadoop' | sed -e :a -e '$!N; s/\n/ /; ta'
rm -rf find /var/lib -maxdepth 1 | egrep -wi 'pgsql|mysql|hcatalog|hive|hbase|zookeeper|oozie|pig|sqoop|snappy|hadoop|knox|hadoop|hue|ambari|tez|flume|storm|accumulo|spark|kafka|falcon|slider|ganglia|nagios|phoenix' | sed -e :a -e '$!N; s/\n/ /; ta'