Using Selenium Webdriver with Javascript

Introduction Selenium-WebDriver

  • The WebDriverJS library uses a promise manager to ease the pain of working with a purely asynchronous API
  • Rather than writing a long chain of promises allows you to write synchronous code by using a blocking API

Sample using a chain of promises [asynchronous Mode]

const {Builder, By, until} = require('selenium-webdriver');
new Builder()
    .forBrowser('firefox')
    .build()
    .then(driver => {
      return driver.get('http://www.google.com/ncr')
        .then(_ => driver.findElement(By.name('q')).sendKeys('webdriver'))
        .then(_ => driver.findElement(By.name('btnK')).click())
        .then(_ => driver.wait(until.titleIs('webdriver - Google Search'), 1000))
        .then(_ => driver.quit());
    });

Sample using a chain of promises [synchronous Mode]

const {Builder, By, until} = require('selenium-webdriver');

let driver = new Builder()
    .forBrowser('firefox')
    .build();

driver.get('http://www.google.com/ncr');
driver.findElement(By.name('q')).sendKeys('webdriver');
driver.findElement(By.name('btnK')).click();
driver.wait(until.titleIs('webdriver - Google Search'), 1000);
driver.quit();

Understanding the Promises Chain

Selenium Code:
driver.sleep(4000);                       // Let the chromedriver Browser open for 4 seconds to review results
driver.quit();

Logger Output:
[2018-01-17T09:37:13Z] [WARNING] [promise.ControlFlow] Detected scheduling of an unchained task.
    When the promise manager is disabled, unchained tasks will not wait for
    previously scheduled tasks to finish before starting to execute.
    New task: Task: WebDriver.quit()
        at thenableWebDriverProxy.schedule (D:\xampp\htdocs\pvdata\testSelenium\test\node_modules\selenium-webdriver\lib\webdriver.js:807:17)
        at thenableWebDriverProxy.quit (D:\xampp\htdocs\pvdata\testSelenium\test\node_modules\selenium-webdriver\lib\webdriver.js:840:23)
        at Object. (D:\xampp\htdocs\pvdata\testSelenium\test\testSelenium.js:31:8)
        at Module._compile (module.js:635:30)
        at Object.Module._extensions..js (module.js:646:10)
        at Module.load (module.js:554:32)
        at tryModuleLoad (module.js:497:12)
        at Function.Module._load (module.js:489:3)
        at Function.Module.runMain (module.js:676:10)
        at startup (bootstrap_node.js:187:16)
    Previous task: Task: WebDriver.sleep(4000)
        at ControlFlow.timeout (D:\xampp\htdocs\pvdata\testSelenium\test\node_modules\selenium-webdriver\lib\promise.js:2510:17)
        at thenableWebDriverProxy.sleep (D:\xampp\htdocs\pvdata\testSelenium\test\node_modules\selenium-webdriver\lib\webdriver.js:956:23)
        at Object. (D:\xampp\htdocs\pvdata\testSelenium\test\testSelenium.js:30:8)
        at Module._compile (module.js:635:30)
        at Object.Module._extensions..js (module.js:646:10)
        at Module.load (module.js:554:32)
        at tryModuleLoad (module.js:497:12)
        at Function.Module._load (module.js:489:3)
        at Function.Module.runMain (module.js:676:10)
        at startup (bootstrap_node.js:187:16)                                                                                                                                                              
  • The Task: WebDriver.quit() is scheduled after WebDriver.sleep(4000);
  • This is called the Promises Chain

Logging with Webdriver-Selenium

Selenium Code: 
var {Builder, By, until, logging } = require('selenium-webdriver');
//
// Enable Logging 
logging.installConsoleHandler();
logging.getLogger('promise.ControlFlow').setLevel(logging.Level.ALL);

Logger Output:
        at Function.Module._load (module.js:489:3)
[2018-01-17T09:37:13Z] [WARNING] [promise.ControlFlow] Detected scheduling of an unchained task.
    When the promise manager is disabled, unchained tasks will not wait for
    previously scheduled tasks to finish before starting to execute.
    New task: Task: WebDriver.getTitle()
        at thenableWebDriverProxy.schedule (D:\xampp\htdocs\pvdata\testSelenium\test\node_modules\selenium-webdriver\lib\webdriver.js:807:17)
        at thenableWebDriverProxy.getTitle (D:\xampp\htdocs\pvdata\testSelenium\test\node_modules\selenium-webdriver\lib\webdriver.js:1000:17)
        at Object. (D:\xampp\htdocs\pvdata\testSelenium\test\testSelenium.js:12:8)
        at Module._compile (module.js:635:30)
        at Object.Module._extensions..js (module.js:646:10)
        at Module.load (module.js:554:32)
        at tryModuleLoad (module.js:497:12)
        at Function.Module._load (module.js:489:3)
        at Function.Module.runMain (module.js:676:10)
        at startup (bootstrap_node.js:187:16)
    Previous task: Task: WebDriver.navigate().to(http://www.google.com/ncr)
        at thenableWebDriverProxy.schedule (D:\xampp\htdocs\pvdata\testSelenium\test\node_modules\selenium-webdriver\lib\webdriver.js:807:17)
        at Navigation.to (D:\xampp\htdocs\pvdata\testSelenium\test\node_modules\selenium-webdriver\lib\webdriver.js:1133:25)
        at thenableWebDriverProxy.get (D:\xampp\htdocs\pvdata\testSelenium\test\node_modules\selenium-webdriver\lib\webdriver.js:988:28)
        at Object. (D:\xampp\htdocs\pvdata\testSelenium\test\testSelenium.js:11:8)
        at Module._compile (module.js:635:30)
        at Object.Module._extensions..js (module.js:646:10)
        at Module.load (module.js:554:32)
        at tryModuleLoad (module.js:497:12)
        at Function.Module._load (module.js:489:3)
        at Function.Module.runMain (module.js:676:10)

Install Selenium-WebDriver /Chromedriver locally

  • npm install selenium-webdriver@3.6.0
  • npm install chromedriver@2.34.1

Verify Installation

D:\xampp\htdocs\pvdata\testSelenium\test>npm list --depth=0
D:\xampp\htdocs\pvdata\testSelenium\test
+-- chromedriver@2.34.1
`-- selenium-webdriver@3.6.0

Testing a Google Search with Selenium Webdriver

Create a JavaScript file testSelenium.js

require('chromedriver');
var {Builder, By, until, logging } = require('selenium-webdriver');
//
// Enable Logging 
// logging.installConsoleHandler();
//logging.getLogger('promise.ControlFlow').setLevel(logging.Level.ALL);

var  driver = new Builder()
    .forBrowser('chrome')
    .build();
dumpMessage( "--- Script Start ---");

driver.get('http://www.google.com/ncr');
driver.getTitle().then(function (titleName ) {	dumpMessage("Initial Google Page load - Page Title: ---" + titleName + "---" ); });

driver.findElement(By.name('q')).sendKeys('webdriver');            	// Fill in the  Serach String to the 
driver.findElement(By.name('btnK')).click();					   	// Star the Google Searcht 	

var searchTitle = 'webdriver - Google Search';                    	// The New Page Title  
driver.wait(until.titleIs(searchTitle), 10000)						// Wait Google Search List becomes availabe
	.then(function (titleName ) { console.log(getTime() + "Title Element now Visible in DOM ? : " + titleName)});

var xPathElem =  '//*[@id="rso"]/div/div/div[1]/div/div/h3/a';      // xPAth to the  first Item from Google Search Results  
driver.wait(until.elementLocated( { xpath: xPathElem }),20000)      // Wait until Goggle Search Results becomes available 
	.then(function () { dumpMessage("XPath wait finished : Element  Found in DOM - xPath :  " + xPathElem );
		}); 

driver.findElement( { xpath: xPathElem }).click()                   // Click ON the for Search Result   
	.then(function (  ) { dumpMessage("xPath  Element  Found in DOM: Triggering a Click Event on first Element of Google Search List !" );
		});
			
driver.sleep(4000);                                               	// Let the chromedriver Browser open for 4 seconds to review results
driver.quit();                                                    	// Finally Quit the driver 

dumpMessage( "--- Script End ---");                              	// Note as all functions are async wer 

															 		// Helper Functions 							
function dumpMessage(mesg) {                                        // console.log at top level will create a undefined message
	console.log(getTime() + mesg);                                  // Provide a wrapper function which returns some data 
	return "---------------------------------------------------";	
}

function getTime() {
	var ts = new Date();
    return ts.toLocaleTimeString()+ ":" + ts.getMilliseconds() + "  ";
}

Run first Selenium-Webdriver Script: testSelenium.js

D:\xampp\htdocs\pvdata\testSelenium\test> node testSelenium.js

Script Output

D:\xampp\htdocs\pvdata\testSelenium\test> node testSelenium.js
10:40:29:745  --- Script Start ---
10:40:29:745  --- Script End ---

DevTools listening on ws://127.0.0.1:12790/devtools/browser/aa1d4440-591b-45dd-8332-f590848ec58c
10:40:34:359  Initial Google Page load - Page Title: ---Google---
10:40:35:173  Title Element now Visible in DOM ? : true
10:40:35:187  XPath wait finished : Element  Found in DOM - xPath :  //*[@id="rso"]/div/div/div[1]/div/div/h3/a
10:40:36:320  xPath  Element  Found in DOM: Triggering a Click Event on first Element of Google Search List !

Understanding the Script Output

  • The Script end is reached in less then 1ms [ due to the Asyn Mode of Node.js ]
  • All driver calls are scheduled via chains of promises for later execution
  • After about 5 seconds the ChromeDriver is started and initial Google Search Mask gets displayed [ Page Title : Google ]
  • About 1 second later the Google Search List gets displayed [ Page Title: webdriver Google-Search ]
  • Again 1 second later the 1 entry of our Google Search List gets displayed

Reference

Unit Testing with Netbeans [Mocha]

×

Versions used

  • Netbeans IDE 8.2
  • Mocha 4.0.1 – Our Testing Tool
  • Chai 4.1.2 – An Assert Library

Create a HTML5 Project with Javascript Support inside Netbeans

Create a HTML5/Javascript Project and add Javascript File main2.js to the project
Image mocha_img0.jpg.jpg NOT Found
  • Note: exports is a node.js concept that declares the functions that your module makes available to code outside itself

Setup Mocha as Testing Provider

Change Project Properties for Javascript Testing
Image mocha_img1.jpg.jpg NOT Found

Setup Mocha/Chai [ Open a Netbeans Terminal Window]

Initialize new Node.js Project


$ cd D:\xampp\htdocs\pv\mochaTest2
$ npm init                                                                                                                                                                                  
This utility will walk you through creating a package.json file.
It only covers the most common items, and tries to guess sensible defaults.

See `npm help json` for definitive documentation on these fields
and exactly what they do.

Use `npm install ` afterwards to install a package and
save it as a dependency in the package.json file.

Press ^C at any time to quit.
package name: (mochatest2) 
version: (1.0.0) 
description: Mocha/Chai Testing
entry point: (index.js) main2.js
test command: mocha
git repository: 
keywords: 
author: Helmut
license: (ISC) MIT
About to write to D:\xampp\htdocs\pv\mochaTest2\package.json:

{
  "name": "mochatest2",
  "version": "1.0.0",
  "description": "Mocha/Chai Testing",
  "main": "main2.js",
  "directories": {
    "test": "test"
  },
  "scripts": {
    "test": "mocha"
  },
  "author": "Helmut",
  "license": "MIT"
}


Is this ok? (yes) yes

helmut@myPC /cygdrive/d/xampp/htdocs/pv/mochaTest2

Setup Mocha and Chai in our Netbeans Project Base Directory

helmut@myPC /cygdrive/d/xampp/htdocs/pv/mochaTest2
$  npm install mocha                                                                                                                                                                       
npm WARN mochatest2@1.0.0 No repository field.
+ mocha@4.0.1
added 24 packages in 2.189s

helmut@myPC /cygdrive/d/xampp/htdocs/pv/mochaTest2
$  npm install chai                                                                                                                                                                          
npm WARN mochatest2@1.0.0 No repository field.

+ chai@4.1.2
added 7 packages in 1.304s

Verify package.json – created by our previous setup steps

helmut@myPC /cygdrive/d/xampp/htdocs/pv/mochaTest2
$ cat   package.json 
{
  "name": "mochatest2",
  "version": "1.0.0",
  "description": "Mocha/Chai Testing",
  "main": "main2.js",
  "directories": {
    "test": "test"
  },
  "scripts": {
    "test": "mocha"
  },
  "author": "Helmut",
  "license": "MIT",
  "dependencies": {
    "chai": "^4.1.2",
    "mocha": "^4.0.1"
  }
}

Create and Run Mocha testscript manually under Unit Test direcorty

Manually run Mocha Tests via Netbeans Terminal Window
Image mocha_img2.jpg.jpg NOT Found
  • Our test script mochaTest2.js imports the functions from Chai and main2.js via require()
  • describe()` is merely used for grouping test Tests – can be nested if needed
  • `it()` is a test case

Finally run Neteans Unit Tests [ ALT F6]

Open Netbeans Test Result Window to review Test Results
Image mocha_img3.jpg.jpg NOT Found
  • Note: If Netbeans IDE hangs during Testing you may need to restart Netbeans to fix this !

Reference

How To Unit Testing JavaScript with Netbeans
Mocha Homepage
Working with package.json
An Absolute Beginner’s Guide to Using npm
Understanding module.exports and exports in Node.js

Decrypt HTTPS traffic with Wireshark and Fiddler

×

Using Wireshark on Windows 7 – Key Facts and Overview

  • Wireshark is a traffic analyzer, that helps you to learn how networking work and how to diagnose Network problems
  • To track SSL Traffic on Windows we use Wireshark with Session Key Logging
  • If the Browser uses the Diffie-Hellman cipher we need to disable it

Understanding the SSL Handshake Protocol

TLS/SSL Handshake Diagram
Image https_img1.jpg NOT Found
  • The SSL or TLS client sends a “client hello” message that lists cryptographic information such as the SSL or TLS version and, in the client’s order of preference, the CipherSuites supported by the client
  • The SSL or TLS server responds with a “server hello” message that contains the CipherSuite chosen by the server from the list provided by the client
  • Reference:An overview of the SSL or TLS handshake
  • Note: The “server hello” is very important as it Diffie-Hellman Cipher can’t be traced !

Disable the Diffie-Hellman Cipher for Browsers

Disable the Diffie-Hellman Cipher for Firefox

  • Launch Firefox, navigate to “about:config” from address bar
  • Double click all the Diffie-Hellman ciphers starting with dhe or ecdhe to change the Value to false
  • Restart Firefox to take effect
Disable the Diffie-Hellman Cipher for Firefox
Image ssl_dh_img2.jpg NOT Found

Disable the Diffie-Hellman Cipher for Chrome

  • View the default Cipher Suites list from the Client Hello message of Chrome
  • Append all the ciphers that contain “ECDHE” or “DHE” in hex to command line parameter:”–cipher-suite-blacklist” and launch Chrome
  • Sample: chrome.exe –cipher-suite-blacklist=0xc02b,0xc02f,0x009e,0xcc14,0xcc13,0xc00a,0xc014,0x0039,0xc009,0xc013,0x0033
Cipher Suites (15 suites)
     Cipher Suite: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 (0xc02b)
     Cipher Suite: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 (0xc02f)
     Cipher Suite: TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 (0x009e)
     Cipher Suite: TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 (0xcc14)
     Cipher Suite: TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 (0xcc13)
     Cipher Suite: TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA (0xc00a)
     Cipher Suite: TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA (0xc014)
     Cipher Suite: TLS_DHE_RSA_WITH_AES_256_CBC_SHA (0x0039)
     Cipher Suite: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA (0xc009)
     Cipher Suite: TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA (0xc013)
     Cipher Suite: TLS_DHE_RSA_WITH_AES_128_CBC_SHA (0x0033)
     Cipher Suite: TLS_RSA_WITH_AES_128_GCM_SHA256 (0x009c)
     Cipher Suite: TLS_RSA_WITH_AES_256_CBC_SHA (0x0035)
     Cipher Suite: TLS_RSA_WITH_AES_128_CBC_SHA (0x002f)
     Cipher Suite: TLS_RSA_WITH_3DES_EDE_CBC_SHA (0x000a)

Testing SSL Decryption with Wireshark

Setup Wireshark

Activate Session Key Logging on Windows Action
Image https_img1.jpg NOT Found
  • Create SSLKEYLOGFILE enviroment variable
Image https_img2.jpg NOT Found
  • Enable SSLKEYLOGFILE for SSL Protocol
Image https_img21.jpg NOT Found
  • If needed add add. HTTPS Ports like : Port 8443
  • Edit -> Preferences -> Protocols -> HTTP

Run Wireshark and decrypt a TLS/SSL packet

Run first https test against Google Website
Image https_img3.jpg NOT Found
  • Testing against https:/gooogle.de
  • The decrypted SSL indicates that this packet was decrypted sucessfully !

Advantages / Disadvantages using Wireshark

Advantages Disadvantages
A little bit complicated Very detailed Info for the complete Network Stack
You need to know a lot of Networking Details Client must support SSLKEYLOGFILE feature
Support only certain Browsers [ Firefox, Chrome ] and email clients [ thunderbird] No support for cURL and WordPress function like wp_get_remote() and

Reference

Using Fiddler

Fiddler – How it Works

Overview
Image fiddler_overview.jpg NOT Found
  • To read HTTPS data Fiddler is acting like a Man-in-the-Middle
  • Fiddler is listening to following Network proxy: localhost:8888
  • On startup Fiddler registers as a WinINET-Proxy service
  • All Browsers [ like Chrrome, Firefox ] taking care of this service sends its data to the >Fiddler-Proxy first
  • Finally Fiddler sends the data to the Target Host

Setup – Enable Decryption of SSL/HTTPS traffic

Setup
Image fiddler_setup.jpg NOT Found
  • Menu: Tools -> Option -> HTTPS
  • Check: X Decrypt HTTPS Traffic

Decrypt cURL HTTPS traffic with Fiddler

Using cUrl : add –proxy 127.0.0.1:8888 to the cUrl command
Image fiddler_curl1.jpg NOT Found
  • Start Fiddler
    [Note Fiddler uses Portnumber: 8888 per Default ]
  • Add –proxy 127.0.0.1:8888 to your curl command
Image fiddler_curl2.jpg NOT Found
  • Use Inspectors menu item
  • This gives as a decrypted view of the HTTPS Post Request
  • Note: Passwords and Username can now easily extracted
  • Use the RAW menu item to get details about
    HTTPS Response Request like:
    HTTP Response Code, HTTPS Header and HTTPS
    Response Body

Decrypt WordPress HTTPS traffic for wp_remote_get()/wp_remote_post() with Fiddler

Application Details

Display Timelines with Fiddler

Timeline
Image fiddler_timeline.jpg NOT Found
  • Page load [ /pv/wr] is decoupled from the PHP processing
  • PHP processing [initWRApp.php] is triggered via Ajax Request
  • All Remote PHP processing steps [login.action, tologin.action and summaryInfo.action] runs sequentially

Display HTTP Packet Details with Fiddler

Step1 : Initial Page Load WebBrowser -> Local Webserver [ 192.168.1.50]
Image fiddler_data_img1.jpg NOT Found
  • Protocal: HTTP
  • Http Get Request to load the initial Page
Step2: Starting PHP Remote Processing WebBrowser -> Local Webserver [ 192.168.1.50]
Image fiddler_data_img2.jpg NOT Found
  • Protocal: HTTP
  • Http Get Request to start PHP Remote Processing with wp_remote_get and wp_remote_post
Step3: Authenticate with Username/Password against Remote Webserver Local Webserver [ 192.168.1.50] -> Remote Webserver [ 52.58.164.53:8443 ]
Image /fiddler_data_img3.jpg NOT Found
  • Protocol: HTTPS Post
  • URL: https://52.58.164.53:8443/security!login.action
  • WordPress Function used: wp_remote_post()
  • Remote Webserver return “op.successfully” if login OK !
  • Remote Webserver return New JSESSIONID cookie if login OK !
Step 4: Capture initial Page after Login into Remote Werbserver Local Webserver [ 192.168.1.50] -> Remote Webserver [ 52.58.164.53:8443]
Image fiddler_data_img4.jpg NOT Found
  • Protocol: HTTPS Get
  • URL: https://52.58.164.53:8443/security!tologin.action
  • WordPress Function used: wp_remote_get()
  • Remote Webserver return Initial Webpage after successful login!
  • Remote Webserver return X-CSRF-Token which should be used for subsequent requests
Step 5: Finally capture Data from Remote Werbserver Local Webserver [ 192.168.1.50] -> Remote Webserver [ 52.58.164.53:8443]
Image fiddler_data_img5.jpg NOT Found
  • Protocol: HTTPS Post
  • URL: https://52.58.164.53:8443/summaryAction!querySummaryInfo.action?nodeSN=0
  • WordPress Function used: wp_remote_post()
  • Remote Webserver return Data in Json Format
  • For this request we use JSESSIONID cookie returned from Step 3
  • For this request we use X-CSRF-TOKEN returned from Step 4

Advantages/Disadvantages using Fiddler

Advantages Disadvantages
Easy Setup Only HTTP/HTTP traffic can be monitored
Support all sort of clients like cURl, PHP, Browers, Email Clients, …

Automate Login to a CRSF protected WebSite using cURL

×

Tools Used for this Tutorial

Tool Tool Version
Curl 7.56.1 (x86_64-pc-win32) libcurl/7.56.1 OpenSSL/1.1.0g (WinSSL)
Google Chrome Version 61.0.3163.100 (Official Build)
Google Dev Tools Version 61.0.3163.100 (Official Build)
bash shell 4.4.12(3)-release

Overview – Login to a WebSite in 3 Steps

Script Name Action Return
WRlogin.sh Login to WebSite using Username Password JSESSIONID cookie
WRlogin2.sh Redirect Request after sucessfull Login X-CSRF-TOKEN via HTML Meta Tags
WRData.sh Access the login protected WebSite Inverter Data like: Current Power, ..

Curl Usage with Google Dev Tools – First steps

To copy Curl Syntax for bash Shell do the following

  • Load your inital WebSite a first time
  • Press F12 to attach/open Google Dev Tools
  • Reload your Page
  • Navigate to Network Tab
  • Right click the desired API call
  • Select “Copy” -> “Copy as cURL (bash)”
Retrieve the cURL bash Command using Google Dev Tools
Image curl_img1.jpg NOT Found

Run our first cURL command

  • Paste the cURL command to a bash Shell
  • Add -v switch to get HTTP header dumped
  • redirect stderr by adding: 2>&1
$ curl 'https://52.58.164.53:8443/index.action'  -H 'Pragma: no-cache' -H 'Accept-Encoding: gzip, deflate, br' -H 'Accept-Language: en-US,en;q=0.8' -H 'Upgrade-Insecure-Requests: 1' -H 'User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' -H 'Cache-Control: no-cache' -H 'Cookie: JSESSIONID=1jzw19tuvg42wl93fxp1exzkf'  -H 'Connection: keep-alive' --compressed --insecure -v 2>&1
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
...
{ [5 bytes data]
< HTTP/1.1 200 OK
< Expires: Thu, 01-Jan-1970 00:00:00 GMT
< Set-Cookie: JSESSIONID=ioyf0enlkduy1geo82r2tryk8;Path=/;Secure;HttpOnly
< Cache-Control: no-store,no-cache
< X-Frame-Options: SAMEORIGIN
< X-Download-Options: noopen
< X-XSS-Protection: 1;mode=block
< Strict-Transport-Security: max-age=31536000;includeSubdomains
< X-Content-Type-Options: nosniff
< Content-Language: de-DE
< Content-Type: text/html; charset=utf-8
< Content-Length: 8471
...

HTTP Status of our first cURL request

  • The Website gets loaded sucessfuly -> HTTP/1.1 200 OK
  • The Website Content-Length is: 8471
  • The Website uses cookies: JSESSIONID
  • The Website uses https protocol

Use the Google Dev to understand the Login Logic

JavaScript and JavaScript Files load by the Initial Request
Image curl_img2.jpg NOT Found

index.action loads 2 javascript files

  • login.js
  • verifyCode.js

Processing and Validation of Login Form


	var verifyResult;
	$("#login").click(function() {
		if (!allowLogin) {
			jAlert(browserMessage, Message.alarm_info, '{"' + Message.sur						+ '":"OK"}');
			clearPassword();				
			return;
		}

		var userName = $.trim($("#userName").val());
		var password = $("#password").val();

		if (!loginValidate(userName, password)) {
			clearPassword();
			return;
		}
		var showVerifyCode = document.getElementById("verifyCode_tr").style.display;
		comitLogin(userName, password);
	});
  • For our cURL tutorials we can skip this Step as we already know username / password
  • Let’s review the JS function comitLogin()

Login Code implemented via sync. AJAX POST resquest

function comitLogin(userName, password) {

	if ($("#veryCode").is(":visible")) {
		if (!validateVCodeLength()) {
			clearPassword();
			return;
		}
	}

	var code = $("#veryCode").attr("value");

	$.ajax({
		url : basePath + "security!login.action",
		type : "POST",
		async : false,
		dataType : "json",
		data : {
			"userName" : userName,
			"password" : password,
			dateTime : new Date().getTime(),
			"veryCode" : code
		},
		cache : false,
		beforeSend:function (XMLHttpRequest)
		{
			//let go
		},
		success : function(res) {
			clearPassword();
			$("#pass1").val("");
			$("#pass2").val("");
			$("#loginErrorMsg").html("")
			$("#loginErrorMsg2").html("");
			if (res.retMsg == "op.successfully") {
				window.location.href = newPath + "securitys!tologin.action";
				return;
			} else if (res.retMsg == "op.update") {
				  $("#loginView").hide();
                       ....
  • For sending username,password,date and verificationCode a POST request is used
  • The remote function to be called is security!login.action
  • A successfull login should return: “op.successfully”
  • After successfull login a page redirection is triggered to: securitys!tologin.action

Step 1: Implement the Website JavaScript Login with bash/cURL

Set breakpoint in AJAX POTS when returning from AJAX POST request
Image curl_img3.jpg NOT Found
Login the Page – Login process is paused
Image curl_img4.jpg NOT Found
Verify Response and Request Headers
Image curl_img5.jpg NOT Found
  • For further processing we need to use the JSESSIONID returned by the Response header

Paste above Output to a Bash Shell

$  curl 'https://52.58.164.53:8443/security!login.action' -H 'Pragma: no-cache' -H 'Origin: https://52.58.164.53:8443' -H 'Accept-Encoding: gzip, deflate, br' -H 'Accept-Language: en-US,en;q=0.8' -H 'User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' -H 'Content-Type: application/x-www-form-urlencoded; charset=UTF-8' -H 'Accept: application/json, text/javascript, */*; q=0.01' -H 'Cache-Control: no-cache' -H 'X-Requested-With: XMLHttpRequest' -H 'Cookie: JSESSIONID=jhqdzlufdh95hevd878z5nig' -H 'Connection: keep-alive' -H 'Referer: https://52.58.164.53:8443/index.action' --data 'userName=pvlocal&password=PPPP&dateTime=1510764929706&veryCode=' --compressed
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0

curl: (60) SSL certificate problem: self signed certificate in certificate chain
More details here: https://curl.haxx.se/docs/sslcerts.html

curl failed to verify the legitimacy of the server and therefore could not
establish a secure connection to it. To learn more about this situation and
how to fix it, please visit the web page mentioned above.

 -> Disable SSL verification by using -k switch 

$ curl 'https://52.58.164.53:8443/security!login.action' -H 'Pragma: no-cache' -H 'Origin: https://52.58.164.53:8443' -H 'Accept-Encoding: gzip, deflate, br' -H 'Accept-Language: en-US,en;q=0.8' -H 'User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' -H 'Content-Type: application/x-www-form-urlencoded; charset=UTF-8' -H 'Accept: application/json, text/javascript, */*; q=0.01' -H 'Cache-Control: no-cache' -H 'X-Requested-With: XMLHttpRequest' -H 'Cookie: JSESSIONID=jhqdzlufdh95hevd878z5nig' -H 'Connection: keep-alive' -H 'Referer: https://52.58.164.53:8443/index.action' --data 'userName=pvlocal&password=PPPP&dateTime=1510764929706&veryCode=' --compressed -k -i
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100    95  100    28  100    67     28     67  0:00:01 --:--:--  0:00:01   196HTTP/1.1 200 OK
Expires: Thu, 01-Jan-1970 00:00:00 GMT
Set-Cookie: JSESSIONID=1pt4jamjah1vbl44uep5ecvzm;Path=/;Secure;HttpOnly
Cache-Control: no-store,no-cache
X-Frame-Options: SAMEORIGIN
X-Download-Options: noopen
X-XSS-Protection: 1;mode=block
Strict-Transport-Security: max-age=31536000;includeSubdomains
X-Content-Type-Options: nosniff
Content-Language: de-DE
Set-Cookie: JSESSIONID=a5sjmaztpwa81acg06hymnxgb;Path=/;Secure;HttpOnly
Content-Type: application/json;charset=UTF-8
Content-Length: 28

{"retMsg":"op.successfully"}

 -> cURL Request works Now  

Testing an Invalid Login by changing password to xxxx

$ curl 'https://52.58.164.53:8443/security!login.action' -H 'Pragma: no-cache' -H 'Origin: https://52.58.164.53:8443' -H 'Accept-Encoding: gzip, deflate, br' -H 'Accept-Language: en-US,en;q=0.8' -H 'User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' -H 'Content-Type: application/x-www-form-urlencoded; charset=UTF-8' -H 'Accept: application/json, text/javascript, */*; q=0.01' -H 'Cache-Control: no-cache' -H 'X-Requested-With: XMLHttpRequest' -H 'Cookie: JSESSIONID=jhqdzlufdh95hevd878z5nig' -H 'Connection: keep-alive' -H 'Referer: https://52.58.164.53:8443/index.action' --data 'userName=pvlocal&password=xxxx&dateTime=1510764929706&veryCode=' --compressed -k
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100   125  100    58  100    67     58     67  0:00:01 --:--:--  0:00:01   348 {"retMsg":"Falscher Benutzername oder falsches Passwort."}

Create a Bash Script for Automation – WRlogin.sh

#!/bin/bash
#
# Use Google Dev dev tools and select Copy as cURL (bash) to learn quickly about the Curl Syntax for a HTTP request
#
# After a successfull login a NEW  JSESSIONID returned from the  response cookie should be used for subsequent requests
#

loginCredentials="userName=pvlocal&password=Feli2010&dateTime=1510483708371&veryCode"
url="https://52.58.164.53:8443/security!login.action"

echo "--------------------------------------------------------------------------------------------------------"
echo "-> URL                      :" $url
echo "-> INPUT Cookie             :" $cookie
echo "-> INPUT Login Credentials  :" $loginCredentials
echo "-> OUTPUT                   :" NEW JSESSIONID cookie after successfull Login
echo "--------------------------------------------------------------------------------------------------------"
#
# DebugON  set, possibly to the empty string will printout add. trace info
# To enable Debugging run:  $ export DebugON
#
if [ $DebugON ]; then
    echo "DebugON is set, possibly to the empty string"
    set -x
fi
#

# Only use the needed HTTP header Fields - Most Http Header Fields copied from our Google DEV Tool cURL copy we do not need
curl_header1='Accept-Language: en-US,en;q=0.8'
curl_header2='Accept-Encoding: gzip, deflate, br'
curl_header3='Accept: application/json, text/javascript, */*; q=0.01'
#
#  Arrays makes this much easier. Don't use Quotes here as $cookie and $csrf_token content uses spaces !
#
args=("-k" "-v" "$url" -H "$cookie" "--data" "$loginCredentials"  -H "$csrf_token"  -H "$curl_header1" -H "${curl_header2}"  -H "${curl_header3}")
#
echo "---------------------------------- cURL command to be executed -----------------------------------------"
echo curl   "${args[@]}"
echo "--------------------------------------------------------------------------------------------------------"
output=$( curl "${args[@]}" 2>&1 )

#
# Sucessfull Login returns following string
#    '"retMsg":"op.successfully"}
#

login_status=""
login_status=$(echo "$output" | grep  retMsg | awk -v FS="(retMsg\":\"|\"})"  '{print  $2 }')

#
# Note; login.sh return two occurances of string:
#   Set-Cookie: JSESSIONID=h0eaof37hnta1wtjlk1j1nig2;Path=/;Secure;HttpOnly
# As we are only intrested on the 2.nd one use: sed -n 2p
#

expcmd=""
expcmd=$(echo "$output" | grep Set-Cookie | sed -n 2p | awk -v FS="(Set-Cookie: |;)"  '{print "export cookie=\"Cookie: " $2 "\""}')

http_return=""
http_return=$(echo "$output" |  grep '< HTTP')


#echo "Login Status                 : " "$login_status"
# echo "New JSESSIONID EXPORT command: "  "$expcmd"

if [ "$login_status" == "op.successfully" ]; then
        echo "--------------------------------------------------------------------------------------------------------"
        echo "HTTP Return Code: "  "$http_return"
        echo "Login OK : Javascript Return Status:" "$login_status"
        echo "Run New JSESSIONID EXPORT command:  \$" "$expcmd"
        echo "--------------------------------------------------------------------------------------------------------"
elif [ "$login_status" == "op.verifyCode.fail" ]; then
        echo "--------------------------------------------------------------------------------------------------------"
        echo "HTTP Return Code: "  "$http_return"
        echo "Login Failded - Wrong verification Code  : Javascript Return Status:" "$login_status"
        echo "Login to WebSite and enter verification Code - after successfull login rerun this script !"
        echo "--------------------------------------------------------------------------------------------------------"
else
        echo "--------------------------------------------------------------------------------------------------------"
        echo "HTTP Return Code: "  "$http_return"
        echo "Login failed  check Login credentials - Javascript Return Status: " "$login_status"
        echo "--------------------------------------------------------------------------------------------------------"
fi

Testing the bash Script WRlogin.sh

$ ./WRlogin.sh
--------------------------------------------------------------------------------------------------------
-> URL                      : https://52.58.164.53:8443/security!login.action
-> INPUT Cookie             : Cookie: JSESSIONID=17grbvbtwu2ga9blndj7rfrsk
-> INPUT Login Credentials  : userName=pvlocal&password=Feli2010&dateTime=1510483708371&veryCode
-> OUTPUT                   : NEW JSESSIONID cookie after successfull Login
--------------------------------------------------------------------------------------------------------
---------------------------------- cURL command to be executed -----------------------------------------
curl -k -v https://52.58.164.53:8443/security!login.action -H Cookie: JSESSIONID=17grbvbtwu2ga9blndj7rfrsk --data userName=pvlocal&password=Feli2010&dateTime=1510483708371&veryCode -H X-CSRF-TOKEN: c3d70f5d-4388-4d85-a1db-9ac797b92418 -H Accept-Language: en-US,en;q=0.8 -H Accept-Encoding: gzip, deflate, br -H Accept: application/json, text/javascript, */*; q=0.01
--------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------
HTTP Return Code:  < HTTP/1.1 200 OK
Login OK : Javascript Return Status: op.successfully
Run New JSESSIONID EXPORT command:  $ export cookie="Cookie: JSESSIONID=rudqwfhaqx0m1mchkau9ycqzs"
--------------------------------------------------------------------------------------------------------

What we learned in Step 1

  • A sucessful login returns message {"retMsg":"op.successfully"} and a new JSESSIONID
  • This JSESSIONID need to be used for subsequent HTTP/AJAX request
  • Note: At this stage we are able to attack this page using a brute FORCE methode

Step 2: Implement the Login Redirect using bash/cURL

  • After as succesfull login the page is redirectet to securitys!tologin.action
  • For the complete JS Code review Step 1
    if (res.retMsg == "op.successfully") {
	window.location.href = newPath + "securitys!tologin.action";
    return;

Use Google Dev tools to extract cURL bash Code

  • Login to the WebSite
  • Navigate to Network Tab
  • Locate the securitys!tologin.action Request
  • Select “Copy” -> “Copy as cURL (bash)”
Initial page
Image curl_img6.jpg NOT Found

Paste the Code to a bash Shell

$  curl 'https://52.58.164.53:8443/securitys!tologin.action' -H 'Pragma: no-cache' -H 'Accept-Encoding: gzip, deflate, br' -H 'Accept-Language: en-US,en;q=0.9' -H 'Upgrade-Insecure-Requests: 1' -H 'User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' -H 'Referer: https://52.58.164.53:8443/index.action' -H 'Cookie: JSESSIONID=1b0miyons0sfl1uqxsbhdv0zud' -H 'Connection: keep-alive' -H 'Cache-Control: no-cache' --compressed --insecure
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0

<br /> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><br /> <html><br /> <head><br /> <base href="https://52.58.164.53:8443/"/><br /> <title>NetEco 1000S</title><br /> <meta http-equiv="pragma" content="no-cache"/><br /> <meta http-equiv="cache-control" content="no-cache"/><br /> <meta http-equiv="expires" content="0"/><br /> <meta http-equiv="keywords" content="keyword1,keyword2,keyword3"/><br /> <meta http-equiv="description" content="This is my page"/><br /> <meta http-equiv="X-UA-Compatible" content="IE=11,IE=8" /><br /> <meta name="_csrf" content="508a5c53-6d3d-4ed1-b56e-e292fcf0a2dc"/><br /> <meta name="_csrf_header" content="X-CSRF-TOKEN"/><br />

Keep special attention to _crsf HTML meta tags returned from above cURL request

  • Note this CRSF Tojne will be used to access the Website !
 meta name="_csrf" content="508a5c53-6d3d-4ed1-b56e-e292fcf0a2dc" 
 meta name="_csrf_header" content="X-CSRF-TOKEN" 

Build script WRLogin2.sh to simulate the Page redirect


#!/bin/bash
#
# Use Googel Dev dev tools and select Copy as cURL (bash) to learn quickly about the Curl Syntax for a HTML request
#
# Use curl -i to get the response header dumped for this GET request
#       This allows us to track the response cookies
# Usage :
#       This script uses the new  JSESSIONID  returned from login.sh
#       This script returns the csrf_token which can be finally used for all subseqent server connections
#           export csrf_token="X-CSRF-TOKEN: 59bbc7b3-5c50-4220-b609-9506aaa83ea4"
#

url="https://52.58.164.53:8443/securitys!tologin.action"
echo "--------------------------------------------------------------------------------------------------------"
echo "-> URL                      :" $url
echo "-> INPUT Cookie JSESSIONID  :" $cookie
echo "-> OUTPUT                   :" X-CSRF-TOKEN extract from HTML META _csrf
echo "--------------------------------------------------------------------------------------------------------"
# DebugON  set, possibly to the empty string will printout add. trace info
# To enable Debugging run:  $ export DebugON
#
if [ $DebugON ]; then
    echo "DebugON is set, possibly to the empty string"
    set -x
fi
#
# Only use the needed HTTP header Fields - Most Http Header Fields copied from our Google DEV Tool cURL copy we do not need
curl_header1='Accept-Language: en-US,en;q=0.8'
curl_header2='Accept-Encoding: gzip, deflate, br'
curl_header3='Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'

#
#  Arrays makes this much easier. Don't use Quotes here as $cookie and $csrf_token content uses spaces !
#
args=("-k" "-v" "$url" -H "$cookie"  -H "$csrf_token"  -H "$curl_header1" -H "${curl_header2}"  -H "${curl_header3}")
#
echo "---------------------------------- cURL command to be executed -----------------------------------------"
echo "curl "  "${args[@]}"
echo "--------------------------------------------------------------------------------------------------------"
#
# Execute Curl Command and save stdout and stderr to a Shell variable
output=$( curl "${args[@]}" 2>&1 )
#
# login2.sh retuns only a NEW X-CSRF-TOKEN via response META Tags _crsf
# We need to extract the crsf_token from the HTML repsonse Meta header  and use if for furhter requests together with JSESSIONID !
#    
#        

#
# - Rerunning login2.sh may result that _crsf meta will be returne 2x.
#    ->Just stop the awk script if we have found X-CSRF-TOKEN
#   by callung exit.
# - Note we search for string _csrf" to limit the lines returned
#

csrf_token=""
csrf_token=$(echo "$output" |  grep '_csrf\" content'  | awk -v FS="(content=\"|\"/>)"  '{print "export csrf_token=\"X-CSRF-TOKEN: "  $2 "\""; exit }' )

http_return=""
http_return=$(echo "$output" |  grep '< HTTP')

if [ -n "$csrf_token" ]; then
        echo "--------------------------------------------------------------------------------------------------------"
        echo "HTTP Return Code: " $http_return
        echo "            Note:  Dont forget to set \$csrf_token variable extracted from HTML Meta Data  !  "
        echo "             Run:  \$" "$csrf_token"
        echo "--------------------------------------------------------------------------------------------------------"
else
        echo "--------------------------------------------------------------------------------------------------------"
        echo "ERROR : X-CSRF-TOKEN not found !"
        echo "HTTP Return Code: " $http_return
        echo "--------------------------------------------------------------------------------------------------------"

fi

Test script WRLogin2.sh to simulate the Page redirect

  • WRlogin2.sh extracts the X-CSRF-TOKEN from HTML Meta Tags
  • Together with JSESSIONID return from WRLogin.sh this Info is used to access our target WEBSite
$ ./WRlogin2.sh
--------------------------------------------------------------------------------------------------------
-> URL                      : https://52.58.164.53:8443/securitys!tologin.action
-> INPUT Cookie JSESSIONID  : Cookie: JSESSIONID=rudqwfhaqx0m1mchkau9ycqzs
-> OUTPUT                   : X-CSRF-TOKEN extract from HTML META _csrf
--------------------------------------------------------------------------------------------------------
---------------------------------- cURL command to be executed -----------------------------------------
curl  -k -v https://52.58.164.53:8443/securitys!tologin.action -H Cookie: JSESSIONID=rudqwfhaqx0m1mchkau9ycqzs -H X-CSRF-TOKEN: c3d70f5d-4388-4d85-a1db-9ac797b92418 -H Accept-Language: en-US,en;q=0.8 -H Accept-Encoding: gzip, deflate, br -H Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
--------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------
HTTP Return Code:  < HTTP/1.1 200 OK
            Note:  Dont forget to set $csrf_token variable extracted from HTML Meta Data  !
             Run:  $ export csrf_token="X-CSRF-TOKEN: 218df123-fa18-44ba-8f3f-2dbc378e987c"
--------------------------------------------------------------------------------------------------------

Step 3: Finally Implement the Page Access using bash/cURL

  • For this script we need: JSESSIONID returned from WRlogin.sh
  • X-CSRF-TOKEN retuned from WRlogin2,sh

Follow the steps from above to extract the cURL bash command

$ curl 'https://52.58.164.53:8443/summaryAction!querySummaryInfo.action?nodeSN=0' -X POST -H 'Pragma: no-cache' -H 'Origin: https://52.58.164.53:8443' -H 'Accept-Encoding: gzip, deflate, br' -H 'X-CSRF-TOKEN: 508a5c53-6d3d-4ed1-b56e-e292fcf0a2dc' -H 'Accept-Language: en-US,en;q=0.9' -H 'User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36' -H 'Accept: application/json, text/javascript, */*; q=0.01' -H 'Cache-Control: no-cache' -H 'X-Requested-With: XMLHttpRequest' -H 'Cookie: JSESSIONID=1b0miyons0sfl1uqxsbhdv0zud' -H 'Connection: keep-alive' -H 'Referer: https://52.58.164.53:8443/summaryAction!accessSummaryPage.action?nodeSN=0' -H 'Content-Length: 0' --compressed --insecure
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  1932  100  1932    0     0   1932      0  0:00:01 --:--:--  0:00:01  7263

...... -->Respone is JSON Encoded 

"summaryInfo":"{\"batterySurfaceTemp\":\"\",\"co2reduce\":\"8,062 t\",\"converseNum\":\ ":\"EUR\",
\"currentPower\":\"3,860 kW\",\"dayPower\":\"12,170 kWh\",\"daytotalRadiation\":\"\",\"gridStatus\":false
.....

Build a bash shell script WRData.sh for simulating the WebSite Access

#!/bin/bash
#
# Use Google Dev dev tools and select Copy as cURL (bash) to learn quickly about the Curl Syntax for a HTML request
#
# Note to run this script we need a valid Env - The related export commands should be run after script executipn
#
#    - JSESSIONID cookie -> login.sh  -> export cookie="Cookie: JSESSIONID=18knsdija6tyjjmej043tac8m"
#    - X-CSRF-TOKEN      -> login2.sh -> export csrf_token='X-CSRF-TOKEN: 1de4c25b-724a-4b96-a187-c72b134a5b9f'
#
#  --> When now rerunning this SCRIPT both CSRF data and JSESSIONID should not change anymore !

url="https://52.58.164.53:8443/summaryAction%21querySummaryInfo.action?nodeSN=0"
echo "--------------------------------------------------------------------------------------------------------"
echo "-> URL                      :" $url
echo "-> INPUT Cookie JSESSIONID  :" $cookie
echo "-> INPUT X-CSRF-TOKEN       :" $csrf_token
echo "-> OUTPUT                   :" HTML Content
echo "--------------------------------------------------------------------------------------------------------"

# DebugON  set, possibly to the empty string will printout add. trace info
if [ $DebugON ]; then
    echo "DebugON is set, possibly to the empty string"
    set -x
fi
#
# Only use the needed HTTP header Fields - Most Http Header Fields copied from our Google DEV Tool cURL copy we do not need
curl_header1='Accept-Language: en-US,en;q=0.8'
curl_header2='Accept-Encoding: gzip, deflate, br'
curl_header3='Accept: application/json, text/javascript, */*; q=0.01'
#
#  Arrays makes this much easier. Don't use Quotes here as $cookie and $csrf_token content uses spaces !
#
args=("-k" "-v" "-X" "POST" "$url" -H "$cookie"  -H "$csrf_token"  -H "$curl_header1" -H "${curl_header2}"  -H "${curl_header3}")
#
echo "---------------------------------- cURL command to be executed -----------------------------------------"
echo "curl "  "${args[@]}"
echo "--------------------------------------------------------------------------------------------------------"
output=$( curl "${args[@]}" 2>&1 )
#
#set +x
#
# String operation on Output
#    awk -v FS="(currentPower|dayPower)"  '{print $2 }'                            ->  \":\"20,397 kW\",\"
#    sed 's/...$//' --> removes the 3 last character [ including the 2.nd Commma ] ->  \":\"20,397 kW\"
#    | tr -d '\\":' --> removes \": from the remaing string :                      ->  20,397 kW
#
# Note : Don't use kW or kWh as FS for the awk command as this may break awk due to multiple occurance of these strings
#        in the HTMT reponse !
#
current_power=""
current_power=$(echo "$output" |  grep 'summaryInfo'  | awk -v FS="(currentPower|dayPower)"  '{print $2 }'  | sed 's/...$//' | tr -d '\\":' )

day_power=""
day_power=$(echo "$output" |  grep 'summaryInfo'  | awk -v FS="(dayPower|daytotalRadiation)"  '{print $2 }' |  sed 's/...$//' | tr -d '\\":' )

http_return=""
http_return=$(echo "$output" |  grep '< HTTP')
echo "--------------------------------------------------------------------------------------------------------"
date
echo "HTTP Return Code: " $http_return
echo "Current Power   : " $current_power
echo "    Day Power   : " $day_power
echo "--------------------------------------------------------------------------------------------------------"

Test bash shell script WRData.sh to access our target WebSite

$ ./WRData.sh
--------------------------------------------------------------------------------------------------------
-> URL                      : https://52.58.164.53:8443/summaryAction%21querySummaryInfo.action?nodeSN=0
-> INPUT Cookie JSESSIONID  : Cookie: JSESSIONID=rudqwfhaqx0m1mchkau9ycqzs
-> INPUT X-CSRF-TOKEN       : X-CSRF-TOKEN: 218df123-fa18-44ba-8f3f-2dbc378e987c
-> OUTPUT                   : HTML Content
--------------------------------------------------------------------------------------------------------
---------------------------------- cURL command to be executed -----------------------------------------
curl  -k -v -X POST https://52.58.164.53:8443/summaryAction%21querySummaryInfo.action?nodeSN=0 -H Cookie: JSESSIONID=rudqwfhaqx0m1mchkau9ycqzs -H X-CSRF-TOKEN: 218df123-fa18-44ba-8f3f-2dbc378e987c -H Accept-Language: en-US,en;q=0.8 -H Accept-Encoding: gzip, deflate, br -H Accept: application/json, text/javascript, */*; q=0.01
--------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------
Fr, 17. Nov 2017 10:09:24
HTTP Return Code:  < HTTP/1.1 200 OK
Current Power   :  3,846 kW
    Day Power   :  3,770 kWh
--------------------------------------------------------------------------------------------------------

Final test of our developed Shell Scripts

$ ./WRlogin.sh
--------------------------------------------------------------------------------------------------------
-> URL                      : https://52.58.164.53:8443/security!login.action
-> INPUT Cookie             : Cookie: JSESSIONID=rudqwfhaqx0m1mchkau9ycqzs
-> INPUT Login Credentials  : userName=pvlocal&password=Feli2010&dateTime=1510483708371&veryCode
-> OUTPUT                   : NEW JSESSIONID cookie after successfull Login
--------------------------------------------------------------------------------------------------------
---------------------------------- cURL command to be executed -----------------------------------------
curl -k -v https://52.58.164.53:8443/security!login.action -H Cookie: JSESSIONID=rudqwfhaqx0m1mchkau9ycqzs --data userName=pvlocal&password=Feli2010&dateTime=1510483708371&veryCode -H  -H Accept-Language: en-US,en;q=0.8 -H Accept-Encoding: gzip, deflate, br -H Accept: application/json, text/javascript, */*; q=0.01
--------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------
HTTP Return Code:  < HTTP/1.1 200 OK
Login OK : Javascript Return Status:  op.successfully 
Run New JSESSIONID EXPORT command:  $ export cookie="Cookie: JSESSIONID=slz60pg4qbqp1bxj6gcrfhgdw" 
--------------------------------------------------------------------------------------------------------

$  export cookie="Cookie: JSESSIONID=slz60pg4qbqp1bxj6gcrfhgdw"
$ ./WRlogin2.sh
--------------------------------------------------------------------------------------------------------
-> URL                      : https://52.58.164.53:8443/securitys!tologin.action
-> INPUT Cookie JSESSIONID  : Cookie: JSESSIONID=slz60pg4qbqp1bxj6gcrfhgdw
-> OUTPUT                   : X-CSRF-TOKEN extract from HTML META _csrf
--------------------------------------------------------------------------------------------------------
---------------------------------- cURL command to be executed -----------------------------------------
curl  -k -v https://52.58.164.53:8443/securitys!tologin.action -H Cookie: JSESSIONID=slz60pg4qbqp1bxj6gcrfhgdw -H  -H Accept-Language: en-US,en;q=0.8 -H Accept-Encoding: gzip, deflate, br -H Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
--------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------
HTTP Return Code:  < HTTP/1.1 200 OK
            Note:  Dont forget to set $csrf_token variable extracted from HTML Meta Data  !
             Run:  $  export csrf_token="X-CSRF-TOKEN: 3fa4555c-0890-4806-a7f1-ad0ffcf6fa41" 
--------------------------------------------------------------------------------------------------------

$ export csrf_token="X-CSRF-TOKEN: 3fa4555c-0890-4806-a7f1-ad0ffcf6fa41"
$ ./WRData.sh
--------------------------------------------------------------------------------------------------------
-> URL                      : https://52.58.164.53:8443/summaryAction%21querySummaryInfo.action?nodeSN=0
-> INPUT Cookie JSESSIONID  : Cookie: JSESSIONID=slz60pg4qbqp1bxj6gcrfhgdw
-> INPUT X-CSRF-TOKEN       : X-CSRF-TOKEN: 3fa4555c-0890-4806-a7f1-ad0ffcf6fa41
-> OUTPUT                   : HTML Content
--------------------------------------------------------------------------------------------------------
---------------------------------- cURL command to be executed -----------------------------------------
curl  -k -v -X POST https://52.58.164.53:8443/summaryAction%21querySummaryInfo.action?nodeSN=0 -H Cookie: JSESSIONID=slz60pg4qbqp1bxj6gcrfhgdw -H X-CSRF-TOKEN: 3fa4555c-0890-4806-a7f1-ad0ffcf6fa41 -H Accept-Language: en-US,en;q=0.8 -H Accept-Encoding: gzip, deflate, br -H Accept: application/json, text/javascript, */*; q=0.01
--------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------
Fr, 17. Nov 2017 12:24:10
HTTP Return Code:  < HTTP/1.1 200 OK
Current Power   :  3,949 kW
    Day Power   :  13,890 kWh
--------------------------------------------------------------------------------------------------------

$ ./WRData.sh
--------------------------------------------------------------------------------------------------------
-> URL                      : https://52.58.164.53:8443/summaryAction%21querySummaryInfo.action?nodeSN=0
-> INPUT Cookie JSESSIONID  : Cookie: JSESSIONID=slz60pg4qbqp1bxj6gcrfhgdw
-> INPUT X-CSRF-TOKEN       : X-CSRF-TOKEN: 3fa4555c-0890-4806-a7f1-ad0ffcf6fa41
-> OUTPUT                   : HTML Content
--------------------------------------------------------------------------------------------------------
---------------------------------- cURL command to be executed -----------------------------------------
curl  -k -v -X POST https://52.58.164.53:8443/summaryAction%21querySummaryInfo.action?nodeSN=0 -H Cookie: JSESSIONID=slz60pg4qbqp1bxj6gcrfhgdw -H X-CSRF-TOKEN: 3fa4555c-0890-4806-a7f1-ad0ffcf6fa41 -H Accept-Language: en-US,en;q=0.8 -H Accept-Encoding: gzip, deflate, br -H Accept: application/json, text/javascript, */*; q=0.01
--------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------
Fr, 17. Nov 2017 12:24:16
HTTP Return Code:  < HTTP/1.1 200 OK
Current Power   :  3,949 kW
    Day Power   :  13,890 kWh
--------------------------------------------------------------------------------------------------------

Reference

  • DVWA - Main Login Page - Brute Force HTTP POST Form With CSRF Token
  • Using GIT

    ×

    Tutorial and Download Location

    GIT Tutorial https://www.youtube.com/watch?v=SWYqp7iY_Tc&t=8s
    Download Location for Windows https://git-scm.com/download/win

    GIT Concepts & Commands

    Concepts First Commands
    Image git_img1.jpg NOT Found Image git_img2.jpg NOT Found

    Using GIT bash and print version and help panel

    GIT Bash Window
    Image git_img3.jpg NOT Found
  • First Git Commands
  • GIT Functions

    Working Directory, Staging Area and Repositories
    Image git_overview.jpg NOT Found
  • Above image is copied from: https://sc5.io/posts/how-to-manage-your-wordpress-codebase-with-git/#gref
  • Init Local Git Repository

    Setup Working Directory, Staging Area and Local Repository
    Image git_img4.jpg NOT Found Run Scripts:

  • /d/GIT/gitInit.sh – Init/Reset GIT repository


  • Init Git Repository

  • The Local Git Repository is stored in .git directory
  • To remove the Git Repository run: rm -rf .git
  • Using .gitignore

    Specify intentionally untracked files to be ignored
    Image git_img5.jpg NOT Found Run Scripts:

  • /d/GIT/gitIngnore.sh
  • Mark files/directories to be igrore by GIT operations

  • Adding some files to the Local Reposiory by running git commit

    Adding 2 files to the working area Details
    Image chapt1_img1.jpgNOT Found Run Scripts:

  • /d/GIT/gitCommit.sh – Add diffs to our Local GIT repository


  • Step 1

  • Adding 2 files to the staging area
  • Commit the changes Details
    Image chapt1_img2.jpgNOT Found Step 2: Commit the data

  • After git commit the staging area gets cleaned
  • Create an GitHup account and setup an GIT Remote Repository

    Create an empty GitHub Remote Repository
    Image git_chapt2_img1.jpg NOT Found
    Image git_chapt2_img2.jpg NOT Found
    Image git_chapt2_img3.jpg NOT Found Run Scripts:

  • /d/GIT/gitRemote.sh
  • Push changes to Remote GIThub repository

  • GIT Operations tracked by GIT diff command

    Git Operation Diagramm Add. Info
    Image git_overview2.jpg NOT Found Git Storage details

  • Workspace or Working Directory
  • Git Index or Staging Area
  • Local Repository
  • Remote Repository
  • Setup GIT Status
    Image git_chapt3_img1.jpg NOT Found Note:

  • SHA == Secure Hash Algorithm


  • Run Scripts:

  • /d/GIT/gitReset.sh – Reset GIT repository
  • /d/GIT/gitDiffSetup.sh – Add file
  • Git Status Details

  • All outstanding operations are Commited
  • Local an Remote Repository are in Sync
  • The SHA value for Local And Remote is: d525f1b
  • There are 2 Commits in your Local Repository
  • First Step: Edit File test.js Verify GIT Status
    Image git_chapt3_img2.jpg NOT Found

    Run Scripts:

  • /d/GIT/gitDiff.sh – Check for Differences


  • Git Status Details

  • test.js was modified but not added to the Staging Area
  • Run git diff HEAD to check for Diffs between Working Dir and Local Repository
  • Use git add … to add a file the the Staging Area
  • Add the test.js to the Staging Area and commit the Transaction Verify GIT Status
    Image git_chapt3_img3.jpg NOT Found Run Scripts:

  • /d/GIT/gitDiff2.sh – Check for Differences


  • Git Status Details

  • git add scripts/test.js adds the file to our Staging Area
  • Run git diff HEAD to check for Diffs between Working Dir and Local Repository
  • Run git diff –staged to check for Diffs between Staging Area and Local Repository
  • Both diff commands marks the line which we have added
  • Using Branches

    Step 1: Create Default Branch with C0,C1,C2 Snapshots Add. Info
    Image git_chapt4_img1.jpg NOT Found Git Branch Details

  • Master points to C2 Snapshot
  • Image git_chapt4_img2.jpg NOT Found Run Scripts:

  • /d/GIT/gitBranchSetup.sh – Initial Script
  • Step 2: Checkout Branch iss53 Add. Info
    Image git_chapt4_img3.jpg NOT Found Git Branch Details

  • Master points to C2 Snapshot
  • iss53 points to C2 Snapshot
  • Image git_chapt4_img4.jpg NOT Found Run Scripts:

  • /d/GIT/gitBranch1.sh – Checkout iss53
  • HEAD points to iss53
  • Step 3: Implement JS code for C3 Snapshot and finally Commit Branch iss53 Add. Info
    Image git_chapt4_img5.jpg NOT Found Git Branch Details

  • Master points to C2 Snapshot
  • iss53 points to C3 Snapshot
  • Image git_chapt4_img6.jpg NOT Found >>> Edit test.js before running script !


    Run Scripts:

  • /d/GIT/gitBranch2.sh – Commit some data an branch iss53
  • HEAD points to iss53
  • Step 4: Checkout master Branch and create new hotfix branch Add. Info
    Image git_chapt4_img7.jpg NOT Found Run Scripts:

  • /d/GIT/gitBranch3.sh – Create hotfix branch
  • HEAD points to hotfix branch aka master branch
  • Step 5: Modify test.js, Implement Hotfix on Branch hotfix and Commit the data Add. Info
    Image git_chapt4_img8.jpg NOT Found Git Branch Details

  • Master points to C2 Snapshot
  • iss53 points to C3 Snapshot
  • hotfix points to C4 Snapshot
  • Image git_chapt4_img9.jpg NOT Found >>> Edit test.js before running script !

    Run Scripts:

  • /d/GIT/gitBranch4.sh – Commit some data on branch hotfix
  • HEAD points now to hotfix branch
  • Step 6: Merge master and hotfix Branch Add. Info
    Image git_chapt4_img10.jpg NOT Found Git Merge Action

  • You’ll notice the phrase “fast-forward” in that merge.
  • C4 pointed to by the branch hotfix was directly ahead of the commit C2
  • Git simply moves the pointer forward.


  • Git Branch Details

  • Master points now to C4 Snapshot
  • iss53 points to C3 Snapshot
  • hotfix points to C4 Snapshot
  • Image git_chapt4_img11.jpg NOT Found Run Scripts:

  • /d/GIT/gitBranch5.sh – Merge master and hotfix Branch
  • HEAD points now to master/hotfix branch
  • Step 7: Delete hotfix Branch and continue work on iss53 branch Add. Info
    Image git_chapt4_img12.jpg NOT Found Run Scripts:

  • /d/GIT/gitBranch6.sh – Delete hotfix Branch and checkout iss53 branch
  • HEAD points now to iss53 branch
  • Step 8: Implement JS code for C5 Snapshot and finally Commit Branch iss53 Add. Info
    Image git_chapt4_img13.jpg NOT Found Git Branch Details

  • Master points to C4 Snapshot
  • iss53 points to C5 Snapshot
  • Image git_chapt4_img14.jpg NOT Found >>> Edit test.js before running script !


    Run Scripts:

  • /d/GIT/gitBranch7.sh – Commit data on branch iss53
  • HEAD points to iss53
  • Step 9: Try to merge iss53 branch and master branch Add. Info
    Image git_chapt4_img18.jpg NOT Found Git Merge Details

  • Master points to C4 Snapshot
  • iss53 points to C5 Snapshot
  • Image git_chapt4_img15.jpg NOT Found >>> NOTE: Merge fails !!


    Run Scripts:

  • /d/GIT/gitBranch8.sh – Merge branches
  • HEAD points to master
  • Step 10: Display Diffs between common Ancestor C2 and iss53 branch and master branch Add. Info
    Image git_chapt4_img17.jpg NOT Found Diffs between Ancestor C2/C4 and C2/C5 Snapshot

  • C2 is common ancestor for C4 and C5 Snapshot
  • Running Git Mergetool

    Step 11: Run git mergetool and fix Merge Conflict
    Image git_chapt4_mergetool.jpg NOT Found
    >>> Merge tool

  • Start GIT mergetool by running: $ git mergetool
  • Left window shows the file test.js for the master branch at Snapshot C4
  • Window in the middle shows the file test.js at Snapshot C2
  • Right window shows the file test.js for the iss53 branch at Snapshot C5


  • >>> Merge Conflict Resolution

  • master and iss 53 show no real conflict
  • Comment out the tags written by the mergetool
  • Finally leave the mergetool with vim command :wqa

  • Commit the data after MERGE request

    Step 12: Commit the Changes done by GIT mergetool Add. Info
    Image git_chapt4_img19.jpg NOT Found Git Branch Details

  • Master points to C6 Snapshot
  • iss53 points to C5 Snapshot
  • Image git_chapt4_img20.jpg NOT Found >>> NOTE: test.js was corrected in step 11 by using GIT mergetool !


    Run Scripts:

  • /d/GIT/gitBranch9.sh – Commit data after MERGE reequest
  • HEAD points now to master branch
  • Branch iss 53 can NOW be deleted !
  • Rename a remote branch

    
    Rename the local branch
    $ git branch --move STUV-184_TDB STUV-188
    
    
    Save the new branch to origing
    $ git push origin --set-upstream STUV-188
    
    warning: redirecting to https://git.informatik.fh-nuernberg.de/study-monitor/pythia-the-counselor.git/
    Total 0 (delta 0), reused 0 (delta 0)
    remote:
    remote: To create a merge request for STUV-188, visit:
    remote:   https://git.informatik.fh-nuernberg.de/study-monitor/pythia-the-counselor/merge_requests/new?merge_request%5Bsource_branch%5D=STUV-188
    remote:
    To https://git.informatik.fh-nuernberg.de/study-monitor/pythia-the-counselor
     * [new branch]      STUV-188 -> STUV-188
    Branch 'STUV-188' set up to track remote branch 'STUV-188' from 'origin'.
    
    
    Delete the old branch 
    $ git push origin --delete STUV-184_TDB
    
    warning: redirecting to https://git.informatik.fh-nuernberg.de/study-monitor/pythia-the-counselor.git/
    To https://git.informatik.fh-nuernberg.de/study-monitor/pythia-the-counselor
     - [deleted]         STUV-184_TDB
    
    

    Add current changes to an NEW Branch

    Valdiate  Status: 
      git status
      On branch develop
      Your branch is ahead of 'origin/develop' by 1 commit.
       (use "git push" to publish your local commits)
    
      Changes not staged for commit:
        (use "git add ..." to update what will be committed)
        (use "git checkout -- ..." to discard changes in working directory)
    
            modified:   application/src/main/java/de/thnuernberg/in/stuv/pythia/application/config/ConfigurationService.java
    
    Add changes to new Branch 
      git branch STUV-330
      git checkout STUV-330
    
    strong>Valdiate  Status: 
    On branch STUV-330
    Changes not staged for commit:
      (use "git add ..." to update what will be committed)
      (use "git checkout -- ..." to discard changes in working directory)
    
            modified:   application/src/main/java/de/thnuernberg/in/stuv/pythia/application/config/ConfigurationService.java
    
    

    Reference

    Install 12.2 Oracle Member Cluster in a Virtualbox env

    This article only exits because  I’m always getting support, fast feedback  and motivation  from

    Anil Nair | Product Manager
    Oracle Real Application Clusters (RAC)

    Verify RHP-Server IO-Server and MGMTDB  status on our Domain Services Cluster

    [grid@dsctw21 ~]$ srvctl status rhpserver
    Rapid Home Provisioning Server is enabled
    Rapid Home Provisioning Server is running on node dsctw21
    [grid@dsctw21 ~]$  srvctl status  mgmtdb 
    Database is enabled
    Instance -MGMTDB is running on node dsctw21
    [grid@dsctw21 ~]$ srvctl status ioserver
    ASM I/O Server is running on dsctw21
    

      Prepare RHP Server

    DNS requirements for HAVIP IP address 
    [grid@dsctw21 ~]$  nslookup rhpserver
    Server:        192.168.5.50
    Address:    192.168.5.50#53
    
    Name:    rhpserver.example.com
    Address: 192.168.5.51
    
    [grid@dsctw21 ~]$  nslookup  192.168.5.51
    Server:        192.168.5.50
    Address:    192.168.5.50#53
    
    51.5.168.192.in-addr.arpa    name = rhpserver.example.com.
    
    [grid@dsctw21 ~]$ ping nslookup rhpserver
    ping: nslookup: Name or service not known
    [grid@dsctw21 ~]$ ping rhpserver
    PING rhpserver.example.com (192.168.5.51) 56(84) bytes of data.
    From dsctw21.example.com (192.168.5.151) icmp_seq=1 Destination Host Unreachable
    From dsctw21.example.com (192.168.5.151) icmp_seq=2 Destination Host Unreachable
    
    -> nslookup works - Nobody should respond to our ping request  as HAVIP is not active YET 
    
    As user root create a HAVIP  
    [root@dsctw21 ~]#  srvctl add havip -id rhphavip -address rhpserver 
    
    *****  Cluster Resources: *****
    Resource NAME               INST   TARGET       STATE        SERVER          STATE_DETAILS
    --------------------------- ----   ------------ ------------ --------------- -----------------------------------------
    ora.rhphavip.havip             1   OFFLINE      OFFLINE      -               STABLE  
    

    Create a Member Cluster Configuration Manifest

    [grid@dsctw21 ~]$ crsctl create  -h
    Usage:
      crsctl create policyset -file <filePath>
    where 
         filePath        Policy set file to create.
    
      crsctl create member_cluster_configuration <member_cluster_name> -file <cluster_manifest_file>  -member_type <database|application>  [-version <member_cluster_version>] [-domain_services [asm_storage <local|direct|indirect>][<rhp>]]
      where 
         member_cluster_name    name of the new Member Cluster
         -file                  path of the Cluster Manifest File (including the '.xml' extension) to be created
         -member_type           type of member cluster to be created
         -version               5 digit version of GI (example: 12.2.0.2.0) on the new Member Cluster, if
                                different from the Domain Services Cluster
         -domain_services       services to be initially configured for this member
                                cluster (asm_storage with local, direct, or indirect access paths, and rhp)
                                --note that if "-domain_services" option is not specified,
                                then only the GIMR and TFA services will be configured
         asm_storage            indicates the storage access path for the database member clusters
                                local : storage is local to the cluster
                                direct or indirect : direct or indirect access to storage provided on the Domain Services Cluster
         rhp                    generate credentials and configuration for an RHP client cluster.
    
    Provide access to DSC Data DG - even we use: asm_storage local
    [grid@dsctw21 ~]$ sqlplus / as sysasm
    SQL> ALTER DISKGROUP data SET ATTRIBUTE 'access_control.enabled' = 'true';
    Diskgroup altered.
    
    Create a  Member Cluster Configuration File with local ASM storage
    
    [grid@dsctw21 ~]$ crsctl create member_cluster_configuration mclu2 -file mclu2.xml  -member_type database -domain_services asm_storage indirect 
    --------------------------------------------------------------------------------
    ASM GIMR TFA ACFS RHP GNS
    ================================================================================
    YES  YES  NO   NO  NO YES
    ================================================================================
    
    If you get ORA-15365 during crsctl create member_cluster_configuration delete the configuration first
     Error ORA-15365: member cluster 'mclu2' already configured
       [grid@dsctw21 ~]$ crsctl delete member_cluster_configuration mclu2
    
    
    [grid@dsctw21 ~]$ crsctl query  member_cluster_configuration mclu2 
              mclu2     12.2.0.1.0 a6ab259d51ea6f91ffa7984299059208 ASM,GIMR
    
    Copy the File to the Member Cluster Host where you plan to start the installation
    [grid@dsctw21 ~]$ sum  mclu2.xml
    54062    22
    
    Copy Member Cluster Manifest File to Member Cluster host
    [grid@dsctw21 ~]$ scp  mclu2.xml mclu21:
    mclu2.xml                                                                                         100%   25KB  24.7KB/s   00:00  
    

    Verify DSC SCAN Address from our Member Cluster Hosts

    [grid@mclu21 grid]$ ping dsctw-scan.dsctw.dscgrid.example.com
    PING dsctw-scan.dsctw.dscgrid.example.com (192.168.5.232) 56(84) bytes of data.
    64 bytes from 192.168.5.232 (192.168.5.232): icmp_seq=1 ttl=64 time=0.570 ms
    64 bytes from 192.168.5.232 (192.168.5.232): icmp_seq=2 ttl=64 time=0.324 ms
    64 bytes from 192.168.5.232 (192.168.5.232): icmp_seq=3 ttl=64 time=0.654 ms
    ^C
    --- dsctw-scan.dsctw.dscgrid.example.com ping statistics ---
    3 packets transmitted, 3 received, 0% packet loss, time 2001ms
    rtt min/avg/max/mdev = 0.324/0.516/0.654/0.140 ms
    
    
    [root@mclu21 ~]# nslookup dsctw-scan.dsctw.dscgrid.example.com
    Server:        192.168.5.50
    Address:    192.168.5.50#53
    
    Non-authoritative answer:
    Name:    dsctw-scan.dsctw.dscgrid.example.com
    Address: 192.168.5.230
    Name:    dsctw-scan.dsctw.dscgrid.example.com
    Address: 192.168.5.226
    Name:    dsctw-scan.dsctw.dscgrid.example.com
    Address: 192.168.5.227

    Start Member Cluster installation

    Unset the ORACLE_BASE environment variable.
    [grid@dsctw21 grid]$ unset ORACLE_BASE
    [grid@dsctw21 ~]$ cd $GRID_HOME
    [grid@dsctw21 grid]$ pwd
    /u01/app/122/grid
    [grid@dsctw21 grid]$ unzip -q  /media/sf_kits/Oracle/122/linuxx64_12201_grid_home.zip
    
    [grid@mclu21 grid]$ gridSetup.sh
    Launching Oracle Grid Infrastructure Setup Wizard...
    
    -> Configure an Oracle Member Cluster for Oracle Database
     -> Member Cluster Manifest File : /home/grid/FILES/mclu2.xml
    
    During parsing the Member Cluster Manifest File following error pops up:
    
    [INS-30211] An unexpected exception occurred while extracting details from ASM client data
    
    PRCI-1167 : failed to extract atttributes from the specified file "/home/grid/FILES/mclu2.xml"
    PRCT-1453 : failed to get ASM properties from ASM client data file /home/grid/FILES/mclu2.xml
    KFOD-00321: failed to read the credential file /home/grid/FILES/mclu2.xml
    

    • At your DSC: Add GNS client Data to   Member Cluster Configuration File
    [grid@dsctw21 ~]$ srvctl export gns -clientdata   mclu2.xml   -role CLIENT
    [grid@dsctw21 ~]$ scp  mclu2.xml mclu21: mclu2.xml                          100%   25KB  24.7KB/s   00:00
    
    
    •  Restart the Member Cluster Installation – should work NOW !

     

    • Our Window 7 Host is busy and show high memory consumption
    • The GIMR is the most challenging part for the Installation

    Verify Member Cluster

    Verify Member Cluster Resources 
    
    Cluster Resources 
    [root@mclu22 ~]# crs
    *****  Local Resources: *****
    Rescource NAME                 TARGET     STATE           SERVER       STATE_DETAILS                       
    -------------------------      ---------- ----------      ------------ ------------------                  
    ora.LISTENER.lsnr              ONLINE     ONLINE          mclu21       STABLE   
    ora.LISTENER.lsnr              ONLINE     ONLINE          mclu22       STABLE   
    ora.net1.network               ONLINE     ONLINE          mclu21       STABLE   
    ora.net1.network               ONLINE     ONLINE          mclu22       STABLE   
    ora.ons                        ONLINE     ONLINE          mclu21       STABLE   
    ora.ons                        ONLINE     ONLINE          mclu22       STABLE   
    *****  Cluster Resources: *****
    Resource NAME               INST   TARGET       STATE        SERVER          STATE_DETAILS
    --------------------------- ----   ------------ ------------ --------------- -----------------------------------------
    ora.LISTENER_SCAN1.lsnr        1   ONLINE       ONLINE       mclu22          STABLE  
    ora.LISTENER_SCAN2.lsnr        1   ONLINE       ONLINE       mclu21          STABLE  
    ora.LISTENER_SCAN3.lsnr        1   ONLINE       ONLINE       mclu21          STABLE  
    ora.cvu                        1   ONLINE       ONLINE       mclu21          STABLE  
    ora.mclu21.vip                 1   ONLINE       ONLINE       mclu21          STABLE  
    ora.mclu22.vip                 1   ONLINE       ONLINE       mclu22          STABLE  
    ora.qosmserver                 1   ONLINE       ONLINE       mclu21          STABLE  
    ora.scan1.vip                  1   ONLINE       ONLINE       mclu22          STABLE  
    ora.scan2.vip                  1   ONLINE       ONLINE       mclu21          STABLE  
    ora.scan3.vip                  1   ONLINE       ONLINE       mclu21          STABLE  
    
    [root@mclu22 ~]#  srvctl config scan 
    SCAN name: mclu2-scan.mclu2.dscgrid.example.com, Network: 1
    Subnet IPv4: 192.168.5.0/255.255.255.0/enp0s8, dhcp
    Subnet IPv6: 
    SCAN 1 IPv4 VIP: -/scan1-vip/192.168.5.202
    SCAN VIP is enabled.
    SCAN VIP is individually enabled on nodes: 
    SCAN VIP is individually disabled on nodes: 
    SCAN 2 IPv4 VIP: -/scan2-vip/192.168.5.231
    SCAN VIP is enabled.
    SCAN VIP is individually enabled on nodes: 
    SCAN VIP is individually disabled on nodes: 
    SCAN 3 IPv4 VIP: -/scan3-vip/192.168.5.232
    SCAN VIP is enabled.
    SCAN VIP is individually enabled on nodes: 
    SCAN VIP is individually disabled on nodes: 
    
    [root@mclu22 ~]#  nslookup  mclu2-scan.mclu2.dscgrid.example.com
    Server:        192.168.5.50
    Address:    192.168.5.50#53
    Non-authoritative answer:
    Name:    mclu2-scan.mclu2.dscgrid.example.com
    Address: 192.168.5.232
    Name:    mclu2-scan.mclu2.dscgrid.example.com
    Address: 192.168.5.202
    Name:    mclu2-scan.mclu2.dscgrid.example.com
    Address: 192.168.5.231
    
    [root@mclu22 ~]# ping mclu2-scan.mclu2.dscgrid.example.com
    PING mclu2-scan.mclu2.dscgrid.example.com (192.168.5.202) 56(84) bytes of data.
    64 bytes from mclu22.example.com (192.168.5.202): icmp_seq=1 ttl=64 time=0.067 ms
    64 bytes from mclu22.example.com (192.168.5.202): icmp_seq=2 ttl=64 time=0.037 ms
    ^C
    --- mclu2-scan.mclu2.dscgrid.example.com ping statistics ---
    2 packets transmitted, 2 received, 0% packet loss, time 1001ms
    rtt min/avg/max/mdev = 0.037/0.052/0.067/0.015 ms
    
    
    [grid@mclu21 ~]$  oclumon manage -get MASTER
    Master = mclu21
    
    [grid@mclu21 ~]$  oclumon manage -get reppath
    CHM Repository Path = +MGMT/_MGMTDB/50472078CF4019AEE0539705A8C0D652/DATAFILE/sysmgmtdata.292.944846507
    
    [grid@mclu21 ~]$  oclumon dumpnodeview -allnodes
    ----------------------------------------
    Node: mclu21 Clock: '2017-05-24 17.51.50+0200' SerialNo:445 
    ----------------------------------------
    SYSTEM:
    #pcpus: 1 #cores: 1 #vcpus: 1 cpuht: N chipname: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz cpuusage: 46.68 cpusystem: 5.80 cpuuser: 40.87 cpunice: 0.00 cpuiowait: 0.00 cpusteal: 0.00 cpuq: 1 physmemfree: 1047400 physmemtotal: 7910784 mcache: 4806576 swapfree: 8257532 swaptotal: 8257532 hugepagetotal: 0 hugepagefree: 0 hugepagesize: 2048 ior: 0 iow: 41 ios: 10 swpin: 0 swpout: 0 pgin: 0 pgout: 20 netr: 81.940 netw: 85.211 procs: 248 procsoncpu: 1 #procs_blocked: 0 rtprocs: 7 rtprocsoncpu: N/A #fds: 10400 #sysfdlimit: 6815744 #disks: 5 #nics: 3 loadavg1: 6.92 loadavg5: 7.16 loadavg15: 5.56 nicErrors: 0
    
    TOP CONSUMERS:
    topcpu: 'gdb(20156) 31.19' topprivmem: 'gdb(20159) 353188' topshm: 'gdb(20159) 151624' topfd: 'crsd(21898) 274' topthread: 'crsd(21898) 52'
    ....
    
    [root@mclu22 ~]#  tfactl print status
    .-----------------------------------------------------------------------------------------------.
    | Host   | Status of TFA | PID  | Port | Version    | Build ID             | Inventory Status   |
    +--------+---------------+------+------+------------+----------------------+--------------------+
    | mclu22 | RUNNING       | 2437 | 5000 | 12.2.1.0.0 | 12210020161122170355 | COMPLETE           |
    | mclu21 | RUNNING       | 1209 | 5000 | 12.2.1.0.0 | 12210020161122170355 | COMPLETE           |
    '--------+---------------+------+------+------------+----------------------+--------------------'
    

    Verify DSC status after Member Cluster Setup

    
    SQL> @pdb_info.sql
    SQL> /*
    SQL>          To connect to GIMR database set ORACLE_SID : export  ORACLE_SID=\-MGMTDB
    SQL> */
    SQL> 
    SQL> set linesize 132
    SQL> COLUMN NAME FORMAT A18
    SQL> SELECT NAME, CON_ID, DBID, CON_UID, GUID FROM V$CONTAINERS ORDER BY CON_ID;
    
    NAME               CON_ID        DBID    CON_UID GUID
    ------------------ ---------- ---------- ---------- --------------------------------
    CDB$ROOT            1 1149111082      1 4700AA69A9553E5FE05387E5E50AC8DA
    PDB$SEED            2  949396570  949396570 50458CC0190428B2E0539705A8C047D8
    GIMR_DSCREP_10            3 3606966590 3606966590 504599D57F9148C0E0539705A8C0AD8D
    GIMR_CLUREP_20            4 2292678490 2292678490 50472078CF4019AEE0539705A8C0D652
    
    --> Management Database hosts a new PDB named GIMR_CLUREP_20
    
    SQL> 
    SQL> !asmcmd  find /DATA/mclu2 \*
    +DATA/mclu2/OCRFILE/
    +DATA/mclu2/OCRFILE/REGISTRY.257.944845929
    +DATA/mclu2/VOTINGFILE/
    +DATA/mclu2/VOTINGFILE/vfile.258.944845949
    
    SQL> !asmcmd find \--type VOTINGFILE / \*
    +DATA/mclu2/VOTINGFILE/vfile.258.944845949
    
    SQL> !asmcmd find \--type   OCRFILE / \*
    +DATA/dsctw/OCRFILE/REGISTRY.255.944835699
    +DATA/mclu2/OCRFILE/REGISTRY.257.944845929
    
    SQL> ! crsctl query css votedisk
    ##  STATE    File Universal Id                File Name Disk group
    --  -----    -----------------                --------- ---------
     1. ONLINE   6e59072e99f34f66bf750a5c8daf616f (AFD:DATA1) [DATA]
     2. ONLINE   ef0d610cb44d4f2cbf9d977090b88c2c (AFD:DATA2) [DATA]
     3. ONLINE   db3f3572250c4f74bf969c7dbaadfd00 (AFD:DATA3) [DATA]
    Located 3 voting disk(s).
    
    SQL> ! crsctl get cluster mode status
    Cluster is running in "flex" mode
    
    SQL> ! crsctl get cluster class
    CRS-41008: Cluster class is 'Domain Services Cluster'
    
    SQL> ! crsctl get cluster name
    CRS-6724: Current cluster name is 'dsctw'

    Potential Errors during Member Cluster Setup

       1. Reading Member Cluster Configuration File fails with  
           [INS-30211] An unexpected exception occurred while extracting details from ASM client data
           PRCI-1167 : failed to extract atttributes from the specified file "/home/grid/FILES/mclu2.xml"
           PRCT-1453 : failed to get ASM properties from ASM client data file /home/grid/FILES/mclu2.xml
           KFOD-00319: No ASM instance available for OCI connection
          Fix : Add GNS client Data to   Member Cluster Configuration File
                $ srvctl export gns -clientdata   mclu2.xml   -role CLIENT
                -> Fix confirmed 
    
       2. Reading Member Cluster Configuration File fails with  
        [INS-30211] An unexpected exception occurred while extracting details from ASM client data
           PRCI-1167 : failed to extract atttributes from the specified file "/home/grid/FILES/mclu2.xml"
           PRCT-1453 : failed to get ASM properties from ASM client data file /home/grid/FILES/mclu2.xml
           KFOD-00321: failed to read the credential file /home/grid/FILES/mclu2.xml 
           -> Double check that the DSC ASM Configuration is working
          This error may be related to running 
          [grid@dsctw21 grid]$ /u01/app/122/grid/gridSetup.sh -executeConfigTools -responseFile /home/grid/grid_dsctw2.rsp  
          and not setting passwords in the related rsp File  
         # Password for SYS user of Oracle ASM
        oracle.install.asm.SYSASMPassword=sys
        # Password for ASMSNMP account
        oracle.install.asm.monitorPassword=sys
          Fix: Add passwords before running   -executeConfigTools step
               -> Fix NOT confirmed  
      
       3. Crashes due to limited memory in  my Virtualbox env 32 GByte
       3.1  Crash of DSC [ Virtualbox host freezes - could not track VM via top ]
            A failed failed Cluster Member Setup due to memory shortage can kill your DSC GNS
            Note: This is a very dangerous situation as it kills your DSC env. 
                  As said always backup OCR and export GNS !
       3.2  Crash of any or all Member Cluster [ Virtualbox host freezes - could not track VM via top ]
            - GIMR database setup is partially installed but not working 
            - Member cluster itself is working fine
    
    

    Member Cluster Deinstall

    On all Member Cluster Nodes but NOT the last one :
    [root@mclu21 grid]#  $GRID_HOME/crs/install/rootcrs.sh -deconfig -force 
    On last Member Cluster Node:
    [root@mclu21 grid]#  $GRID_HOME/crs/install/rootcrs.sh -deconfig -force -lastnode
    ..
    2017/05/25 14:37:18 CLSRSC-559: Ensure that the GPnP profile data under the 'gpnp' directory in /u01/app/122/grid is deleted on each node before using the software in the current Grid Infrastructure home for reconfiguration.
    2017/05/25 14:37:18 CLSRSC-590: Ensure that the configuration for this Storage Client (mclu2) is deleted by running the command 'crsctl delete member_cluster_configuration <member_cluster_name>' on the Storage Server.
    
    Delete Member Cluster mclu2 - Commands running on DSC
    
    [grid@dsctw21 ~]$ crsctl delete  member_cluster_configuration mclu2 
    ASMCMD-9477: delete member cluster 'mclu2' failed
    KFOD-00327: failed to delete member cluster 'mclu2'
    ORA-15366: unable to delete configuration for member cluster 'mclu2' because the directory '+DATA/mclu2/VOTINGFILE' was not empty
    ORA-06512: at line 4
    ORA-06512: at "SYS.X$DBMS_DISKGROUP", line 724
    ORA-06512: at line 2
    
    ASMCMD> find mclu2/ *
    +DATA/mclu2/VOTINGFILE/
    +DATA/mclu2/VOTINGFILE/vfile.258.944845949
    ASMCMD> rm +DATA/mclu2/VOTINGFILE/vfile.258.94484594
    
    SQL>    @pdb_info
    NAME               CON_ID        DBID    CON_UID GUID
    ------------------ ---------- ---------- ---------- --------------------------------
    CDB$ROOT            1 1149111082      1 4700AA69A9553E5FE05387E5E50AC8DA
    PDB$SEED            2  949396570  949396570 50458CC0190428B2E0539705A8C047D8
    GIMR_DSCREP_10            3 3606966590 3606966590 504599D57F9148C0E0539705A8C0AD8D
    
    -> GIMR_CLUREP_20 PDB was deleted !
    
    [grid@dsctw21 ~]$ srvctl config gns -list
    dsctw21.CLSFRAMEdsctw SRV Target: 192.168.2.151.dsctw Protocol: tcp Port: 40020 Weight: 0 Priority: 0 Flags: 0x101
    dsctw21.CLSFRAMEdsctw TXT NODE_ROLE="HUB", NODE_INCARNATION="0", NODE_TYPE="20" Flags: 0x101
    dsctw22.CLSFRAMEdsctw SRV Target: 192.168.2.152.dsctw Protocol: tcp Port: 58466 Weight: 0 Priority: 0 Flags: 0x101
    dsctw22.CLSFRAMEdsctw TXT NODE_ROLE="HUB", NODE_INCARNATION="0", NODE_TYPE="20" Flags: 0x101
    mclu21.CLSFRAMEmclu2 SRV Target: 192.168.2.155.mclu2 Protocol: tcp Port: 14064 Weight: 0 Priority: 0 Flags: 0x101
    mclu21.CLSFRAMEmclu2 TXT NODE_ROLE="HUB", NODE_INCARNATION="0", NODE_TYPE="20" Flags: 0x101
    dscgrid.example.com DLV 20682 10 18 ( XoH6wdB6FkuM3qxr/ofncb0kpYVCa+hTubyn5B4PNgJzWF4kmbvPdN2CkEcCRBxt10x/YV8MLXEe0emM26OCAw== ) Unique Flags: 0x314
    dscgrid.example.com DNSKEY 7 3 10 ( MIIBCgKCAQEAvu/8JsrxQAVTEPjq4+JfqPwewH/dc7Y/QbJfMp9wgIwRQMZyJSBSZSPdlqhw8fSGfNUmWJW8v+mJ4JsPmtFZRsUW4iB7XvO2SwnEuDnk/8W3vN6sooTmH82x8QxkOVjzWfhqJPLkGs9NP4791JEs0wI/HnXBoR4Xv56mzaPhFZ6vM2aJGWG0N/1i67cMOKIDpw90JV4HZKcaWeMsr57tOWqEec5+dhIKf07DJlCqa4UU/oSHH865DBzpqqEhfbGaUAiUeeJVVYVJrWFPhSttbxsdPdCcR9ulBLuR6PhekMj75wxiC8KUgAL7PUJjxkvyk3ugv5K73qkbPesNZf6pEQIDAQAB ) Unique Flags: 0x314
    dscgrid.example.com NSEC3PARAM 10 0 2 ( jvm6kO+qyv65ztXFy53Dkw== ) Unique Flags: 0x314
    dsctw-scan.dsctw A 192.168.5.226 Unique Flags: 0x81
    dsctw-scan.dsctw A 192.168.5.235 Unique Flags: 0x81
    dsctw-scan.dsctw A 192.168.5.238 Unique Flags: 0x81
    dsctw-scan1-vip.dsctw A 192.168.5.238 Unique Flags: 0x81
    dsctw-scan2-vip.dsctw A 192.168.5.235 Unique Flags: 0x81
    dsctw-scan3-vip.dsctw A 192.168.5.226 Unique Flags: 0x81
    dsctw21-vip.dsctw A 192.168.5.225 Unique Flags: 0x81
    dsctw22-vip.dsctw A 192.168.5.241 Unique Flags: 0x81
    dsctw-scan1-vip A 192.168.5.238 Unique Flags: 0x81
    dsctw-scan2-vip A 192.168.5.235 Unique Flags: 0x81
    dsctw-scan3-vip A 192.168.5.226 Unique Flags: 0x81
    dsctw21-vip A 192.168.5.225 Unique Flags: 0x81
    dsctw22-vip A 192.168.5.241 Unique Flags: 0x81
    dsctw21.gipcdhaname SRV Target: 192.168.2.151.dsctw Protocol: tcp Port: 41795 Weight: 0 Priority: 0 Flags: 0x101
    dsctw21.gipcdhaname TXT NODE_ROLE="HUB", NODE_INCARNATION="0", NODE_TYPE="20" Flags: 0x101
    dsctw22.gipcdhaname SRV Target: 192.168.2.152.dsctw Protocol: tcp Port: 61595 Weight: 0 Priority: 0 Flags: 0x101
    dsctw22.gipcdhaname TXT NODE_ROLE="HUB", NODE_INCARNATION="0", NODE_TYPE="20" Flags: 0x101
    mclu21.gipcdhaname SRV Target: 192.168.2.155.mclu2 Protocol: tcp Port: 31416 Weight: 0 Priority: 0 Flags: 0x101
    mclu21.gipcdhaname TXT NODE_ROLE="HUB", NODE_INCARNATION="0", NODE_TYPE="20" Flags: 0x101
    gpnpd h:dsctw21 c:dsctw u:c5323627b2484f8fbf20e67a2c4624e1.gpnpa2c4624e1 SRV Target: dsctw21.dsctw Protocol: tcp Port: 21099 Weight: 0 Priority: 0 Flags: 0x101
    gpnpd h:dsctw21 c:dsctw u:c5323627b2484f8fbf20e67a2c4624e1.gpnpa2c4624e1 TXT agent="gpnpd", cname="dsctw", guid="c5323627b2484f8fbf20e67a2c4624e1", host="dsctw21", pid="12420" Flags: 0x101
    gpnpd h:dsctw22 c:dsctw u:c5323627b2484f8fbf20e67a2c4624e1.gpnpa2c4624e1 SRV Target: dsctw22.dsctw Protocol: tcp Port: 60348 Weight: 0 Priority: 0 Flags: 0x101
    gpnpd h:dsctw22 c:dsctw u:c5323627b2484f8fbf20e67a2c4624e1.gpnpa2c4624e1 TXT agent="gpnpd", cname="dsctw", guid="c5323627b2484f8fbf20e67a2c4624e1", host="dsctw22", pid="13141" Flags: 0x101
    CSSHub1.hubCSS SRV Target: dsctw21.dsctw Protocol: gipc Port: 0 Weight: 0 Priority: 0 Flags: 0x101
    CSSHub1.hubCSS TXT HOSTQUAL="dsctw" Flags: 0x101
    Net-X-1.oraAsm SRV Target: 192.168.2.151.dsctw Protocol: tcp Port: 1526 Weight: 0 Priority: 0 Flags: 0x101
    Net-X-2.oraAsm SRV Target: 192.168.2.152.dsctw Protocol: tcp Port: 1526 Weight: 0 Priority: 0 Flags: 0x101
    Oracle-GNS A 192.168.5.60 Unique Flags: 0x315
    dsctw.Oracle-GNS SRV Target: Oracle-GNS Protocol: tcp Port: 14123 Weight: 0 Priority: 0 Flags: 0x315
    dsctw.Oracle-GNS TXT CLUSTER_NAME="dsctw", CLUSTER_GUID="c5323627b2484f8fbf20e67a2c4624e1", NODE_NAME="dsctw21", SERVER_STATE="RUNNING", VERSION="12.2.0.0.0", PROTOCOL_VERSION="0xc200000", DOMAIN="dscgrid.example.com" Flags: 0x315
    Oracle-GNS-ZM A 192.168.5.60 Unique Flags: 0x315
    dsctw.Oracle-GNS-ZM SRV Target: Oracle-GNS-ZM Protocol: tcp Port: 39923 Weight: 0 Priority: 0 Flags: 0x315
    
    --> Most GNS entries for our Member cluster were deleted

    Re-Executing GRID setup fails with [FATAL] [INS-30024]

     

    Re-Executing GRID setup fails with [FATAL] [INS-30024]
    
    After an unclean deinstallation gridSetup.sh fails with error  [FATAL] [INS-30024]
    Instead of offering the option to install a NEW cluster the installer offers the GRID Upgrade option

    Debugging with strace

    [grid@dsctw21 grid]$   gridSetup.sh -silent  -skipPrereqs -responseFile  /home/grid/grid_dsctw2.rsp    oracle.install.asm.SYSASMPassword=sys    oracle.install.asm.monitorPassword=sys 2>llog2
    Launching Oracle Grid Infrastructure Setup Wizard...
    
    [FATAL] [INS-30024] Installer has detected that the location determined as Oracle Grid Infrastructure home (/u01/app/122/grid), is not a valid Oracle home.
       ACTION: Ensure that either there are no environment variables pointing to this invalid location or register the location as an Oracle home in the central inventory.
    
    Using strace to trace system calls 
    [grid@dsctw21 grid]$ strace -f  gridSetup.sh -silent  -skipPrereqs -responseFile  /home/grid/grid_dsctw2.rsp    oracle.install.asm.SYSASMPassword=sys    oracle.install.asm.monitorPassword=sys 2>llog
    Launching Oracle Grid Infrastructure Setup Wizard...
    
    [FATAL] [INS-30024] Installer has detected that the location determined as Oracle Grid Infrastructure home (/u01/app/122/grid), is not a valid Oracle home.
       ACTION: Ensure that either there are no environment variables pointing to this invalid location or register the location as an Oracle home in the central inventory.
    
    Check Log File for failed open calls or for open calls which  should fail in CLEAN Installation ENV 
    grid@dsctw21 grid]$ grep open llog
    ..
    [pid 11525] open("/etc/oracle/ocr.loc", O_RDONLY) = 93
    [pid 11525] open("/etc/oracle/ocr.loc", O_RDONLY) = 93
    
    --> It seems the installer is testing for files
     /etc/oracle/ocr.loc
     /etc/oracle/olr.loc 
    whether its an upgrade or its a new installation. 
    
    Fix : Rename ocr.log and olr.loc 
    [root@dsctw21 ~]# mv /etc/oracle/ocr.loc /etc/oracle/ocr.loc_tbd
    [root@dsctw21 ~]# mv /etc/oracle/olr.loc /etc/oracle/olr.loc_tbd
    
    Now gridSetup.sh should start the installation process

    Restoring the OCR – 12.2

    Backup currently active OCR 
    [root@dsctw21 peer]# ocrconfig -manualbackup
    dsctw21     2017/05/21 09:07:10     +MGMT:/dsctw/OCRBACKUP/backup_20170521_090710.ocr.292.944557631     0     
    [root@dsctw21 peer]#  ocrconfig -showbackup
    PROT-24: Auto backups for the Oracle Cluster Registry are not available
    dsctw21     2017/05/21 09:07:10     +MGMT:/dsctw/OCRBACKUP/backup_20170521_090710.ocr.292.944557631     0   
    
    Locate all OCR backups 
    ASMCMD> find --type OCRBACKUP / *
    +MGMT/dsctw/OCRBACKUP/14348721.293.944515403
    +MGMT/dsctw/OCRBACKUP/backup_20170521_090710.ocr.292.944557631
    ASMCMD> ls -l +MGMT/dsctw/OCRBACKUP/14348721.293.944515403
    Type       Redund  Striped  Time             Sys  Name
    OCRBACKUP  UNPROT  COARSE   MAY 20 21:00:00  Y    14348721.293.944515403
    ASMCMD> ls -l +MGMT/dsctw/OCRBACKUP/backup_20170521_090710.ocr.292.944557631
    Type       Redund  Striped  Time             Sys  Name
    OCRBACKUP  UNPROT  COARSE   MAY 21 09:00:00  Y    backup_20170521_090710.ocr.292.944557631
    
    --> Note the first backup was created by root.sh !
        After a GNS corruption we need to restore to the OCR created by root.sh
    
    List the nodes and cluster resources in your cluster by running the following command on one node:
    [grid@dsctw21 ~]$ olsnodes
    dsctw21
    dsctw22
    
    [grid@dsctw21 ~]$ crs
    *****  Cluster Resources: *****
    Resource NAME               INST   TARGET       STATE        SERVER          STATE_DETAILS
    --------------------------- ----   ------------ ------------ --------------- -----------------------------------------
    ora.LISTENER_SCAN1.lsnr        1   ONLINE       ONLINE       dsctw22         STABLE  
    ora.LISTENER_SCAN2.lsnr        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.LISTENER_SCAN3.lsnr        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.MGMTLSNR                   1   ONLINE       ONLINE       dsctw21         169.254.156.94 192.1 68.2.151,STABLE
    ora.asm                        1   ONLINE       ONLINE       dsctw21         Started,STABLE  
    ora.asm                        2   ONLINE       ONLINE       dsctw22         Started,STABLE  
    ora.asm                        3   OFFLINE      OFFLINE      -               STABLE  
    ora.cvu                        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.dsctw21.vip                1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.dsctw22.vip                1   ONLINE       ONLINE       dsctw22         STABLE  
    ora.gns                        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.gns.vip                    1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.ioserver                   1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.ioserver                   2   ONLINE       ONLINE       dsctw22         STABLE  
    ora.ioserver                   3   ONLINE       OFFLINE      -               STABLE  
    ora.mgmtdb                     1   ONLINE       ONLINE       dsctw21         Open,STABLE  
    ora.qosmserver                 1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.rhpserver                  1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.scan1.vip                  1   ONLINE       ONLINE       dsctw22         STABLE  
    ora.scan2.vip                  1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.scan3.vip                  1   ONLINE       ONLINE       dsctw21         STABLE  
    
    If OCR is located in an Oracle ASM disk group, then stop the Oracle Clusterware daemon:
    [root@dsctw21 ~]# crsctl stop crs 
    [root@dsctw22 ~]# crsctl stop crs 
      
    Start the Oracle Clusterware stack on one node in exclusive mode by running the following command as root:
    [root@dsctw21 ~]#  crsctl start crs -excl -nocrs
    
    The -nocrs option ensures that the CRSD process and OCR do not start with the rest of the Oracle Clusterware stack.
    
    Ignore any errors that display.
    Check whether CRSD is running by running the following command:
    [root@dsctw21 ~]# crsi
    
    *****  Local Resources: *****
    Resource NAME               INST   TARGET       STATE        SERVER          STATE_DETAILS
    --------------------------- ----   ------------ ------------ --------------- -----------------------------------------
    ora.asm                        1   ONLINE       ONLINE       dsctw21         Started,STABLE  
    ora.cluster_interconnect.haip  1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.crf                        1   OFFLINE      OFFLINE      -               STABLE  
    ora.crsd                       1   OFFLINE      OFFLINE      -               STABLE  
    ora.cssd                       1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.cssdmonitor                1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.ctssd                      1   ONLINE       ONLINE       dsctw21         OBSERVER,STABLE  
    ora.diskmon                    1   OFFLINE      OFFLINE      -               STABLE  
    ora.driver.afd                 1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.drivers.acfs               1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.evmd                       1   ONLINE       INTERMEDIATE dsctw21         STABLE  
    ora.gipcd                      1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.gpnpd                      1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.mdnsd                      1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.storage                    1   OFFLINE      OFFLINE      -               STABLE  
    
    
        If CRSD is running, then stop it by running the following command as root:
        # crsctl stop resource ora.crsd -init
    
       
    Locate all OCR backups 
    ASMCMD> find --type OCRBACKUP / *
    +MGMT/dsctw/OCRBACKUP/14348721.293.944515403
    +MGMT/dsctw/OCRBACKUP/backup_20170521_090710.ocr.292.944557631
    ASMCMD> ls -l +MGMT/dsctw/OCRBACKUP/14348721.293.944515403
    Type       Redund  Striped  Time             Sys  Name
    OCRBACKUP  UNPROT  COARSE   MAY 20 21:00:00  Y    14348721.293.944515403
    ASMCMD> ls -l +MGMT/dsctw/OCRBACKUP/backup_20170521_090710.ocr.292.944557631
    Type       Redund  Striped  Time             Sys  Name
    OCRBACKUP  UNPROT  COARSE   MAY 21 09:00:00  Y    backup_20170521_090710.ocr.292.944557631
    
    Restore OCR with an OCR backup that you can identify in "Listing Backup Files" by running the following command as root:
    [root@dsctw21 ~]# ocrconfig -restore +MGMT/dsctw/OCRBACKUP/14348721.293.944515403
    
        Note:
            If the original OCR location does not exist, then you must create an empty (0 byte) OCR location before 
            you run the ocrconfig -restore command.
            Ensure that the OCR devices that you specify in the OCR configuration exist and that these OCR devices are valid.
            If you configured OCR in an Oracle ASM disk group, then ensure that the Oracle ASM disk group exists and is mounted.
            If the OCR backup file is located in an Oracle ASM disk group, then ensure that the disk group exists and is mounted.
    
    [root@dsctw21 ~]# ocrcheck
    Status of Oracle Cluster Registry is as follows :
         Version                  :          4
         Total space (kbytes)     :     409568
         Used space (kbytes)      :       3992
         Available space (kbytes) :     405576
         ID                       : 2008703361
         Device/File Name         :      +DATA
                                        Device/File integrity check succeeded
                                        Device/File not configured
                                        Device/File not configured
                                        Device/File not configured
                                        Device/File not configured
         Cluster registry integrity check succeeded
         Logical corruption check succeeded
    
    [root@dsctw21 ~]# crsctl stop crs -f
        
        Run the ocrconfig -repair -replace command as root on all the nodes in the cluster where you did not the 
        ocrconfig -restore command. For example, if you ran the ocrconfig -restore command on node 1 of a four-node 
        cluster, then you must run the ocrconfig -repair -replace command on nodes 2, 3, and 4.
    
    Begin to start Oracle Clusterware by running the following command as root on all of the nodes:
    [root@dsctw21 ~]#  crsctl start crs
    [root@dsctw22 ~]#  crsctl start crs
    
    Verify OCR integrity of all of the cluster nodes that are configured as part of your cluster by running the following CVU command:
    [grid@dsctw21 ~]$  cluvfy comp ocr -n all -verbose
    Verifying OCR Integrity ...PASSED
    Verification of OCR integrity was successful. 
    CVU operation performed:      OCR integrity
    Date:                         May 21, 2017 8:13:54 PM
    CVU home:                     /u01/app/122/grid/
    User:                         grid
    
    Verify cluster resources 
    [root@dsctw22 ~]#  crs
    *****  Cluster Resources: *****
    Resource NAME               INST   TARGET       STATE        SERVER          STATE_DETAILS
    --------------------------- ----   ------------ ------------ --------------- -----------------------------------------
    ora.LISTENER_SCAN1.lsnr        1   ONLINE       ONLINE       dsctw22         STABLE  
    ora.LISTENER_SCAN2.lsnr        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.LISTENER_SCAN3.lsnr        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.MGMTLSNR                   1   ONLINE       ONLINE       dsctw21         169.254.156.94 192.1 68.2.151,STABLE
    ora.asm                        1   ONLINE       ONLINE       dsctw21         Started,STABLE  
    ora.asm                        2   ONLINE       ONLINE       dsctw22         Started,STABLE  
    ora.asm                        3   OFFLINE      OFFLINE      -               STABLE  
    ora.cvu                        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.dsctw21.vip                1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.dsctw22.vip                1   ONLINE       ONLINE       dsctw22         STABLE  
    ora.gns                        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.gns.vip                    1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.ioserver                   1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.ioserver                   2   ONLINE       ONLINE       dsctw22         STABLE  
    ora.ioserver                   3   ONLINE       OFFLINE      -               STABLE  
    ora.mgmtdb                     1   ONLINE       ONLINE       dsctw21         Open,STABLE  
    ora.qosmserver                 1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.rhpserver                  1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.scan1.vip                  1   ONLINE       ONLINE       dsctw22         STABLE  
    ora.scan2.vip                  1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.scan3.vip                  1   ONLINE       ONLINE       dsctw21         STABLE

    A deeper dive into JPA, 2-Phase-Commit [ 2PC ] and RAC

    Overview JPA and 2-Phase-Commit

    Mike Keith Architect at Oracle and Author

    Pro JPA 2: Mastering the Java Persistence API (Second edition)

    summarizes the usage of JPA in a distributed evironment the following :

    • A JPA application will get the 2PC benefits the same as any other application
    • The peristence unit data source is using JTA and  is configured to use an XA data source
    • The XA resources and transaction manager 2PC interactions happen on their own without the JPA EMF knowing or having to be involved.
    • If a 2PC XA tx fails then an exception will be thrown just the same as if the tx was optimized to not have 2PC.

    This was enough motivation for me working on Oracle RAC and JDBC projects to have a closer look on JPA and 2PC.

    Versions used  / Configuration File persistence.xml

    Wildfly:  8.2
    Hibernate Version: 4.3.7.Final
    --> Collecting Data for RAC database1
        Driver Name             : Oracle JDBC driver
        Driver Version          : 12.1.0.2.0
        Database Product Version: Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit Production
        DB Name:  BANKA
        1. Instance Name: bankA_2 - Host: hract21.example.com - Pooled XA Connections: 61
    
    --> Collecting Data for RAC database2
        Driver Name             : Oracle JDBC driver
        Driver Version          : 12.1.0.2.0
        Database Product Version: Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit Production
        DB Name:  BANKB
        1. Instance Name: bankb_3 - Host: hract21.example.com - Pooled XA Connections: 62
    
    persistence.xml
    
    <?xml version="1.0"?>
    <persistence xmlns="http://java.sun.com/xml/ns/persistence"
                 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
                 xsi:schemaLocation="http://java.sun.com/xml/ns/persistence http://java.sun.com/xml/ns/persistence/persistence_2_0.xsd"
                 version="2.0">
    
        <persistence-unit name="RacBankAHibPU" transaction-type="JTA">
            <provider>org.hibernate.ejb.HibernatePersistence</provider>
            <jta-data-source>java:/jboss/datasources/xa_rac12g_banka</jta-data-source>
            <class>com.hhu.wfjpa2pc.Accounts</class>
            <properties>
                <property name="hibernate.transaction.jta.platform"
                     value="org.hibernate.service.jta.platform.internal.JBossAppServerJtaPlatform" />
                <property name="hibernate.show_sql" value="true" />
                <property name="hibernate.dialect" value="org.hibernate.dialect.Oracle10gDialect"/>
            </properties>
        </persistence-unit>
        <persistence-unit name="RacBankBHibPU" transaction-type="JTA">
            <provider>org.hibernate.ejb.HibernatePersistence</provider>
            <jta-data-source>java:/jboss/datasources/xa_rac12g_bankb</jta-data-source>
            <class>com.hhu.wfjpa2pc.Accounts</class>
            <properties>
                <property name="hibernate.transaction.jta.platform"
                     value="org.hibernate.service.jta.platform.internal.JBossAppServerJtaPlatform" />
                <property name="hibernate.show_sql" value="true" />
                <property name="hibernate.dialect" value="org.hibernate.dialect.Oracle10gDialect"/>
            </properties>
        </persistence-unit>
    </persistence>
    
    

    Running a successful 2PC operation with JPA

    Call Flow 
    
    - Get EntityManager for RAC Database1 [ em1=getEntityManager1(); ]
    - Get EntityManager for RAC Database2 [ em2=getEntityManager2(); ]
    - Start as Usertransacation             [ ut.begin(); ]
    - Join transaction from EntityManager 1  [ em1.joinTransaction(); ]
    - Join transaction from EntityManager 2  [ em2.joinTransaction(); ]
    - Chance Balance on both databases
    bankA_acct.setBalance( bankA_acct.getBalance().add(b) );
    em1.merge(bankA_acct);
    if (isEnableFlush() )
    em1.flush();
    
    bankB_acct.setBalance( bankB_acct.getBalance().subtract(b) );
    em2.merge(bankB_acct);
    if (isEnableFlush() )
    em2.flush();
    - Finally commit the Transaction [ ut.commit(); ]
    
    Application log :
    14:51:58.071 transferMoneyImpl():: Found both Entity Managers for PUs : RacBankAHibPU and RacBankBHibPU
    14:51:58.074 transferMoneyImpl():: Account at bank A: User99_at_BANKA - Balance: 10000
    14:51:58.075 transferMoneyImpl():: Account at bank B: User98_at_BANKB - Balance: 10000
    14:51:58.076 transferMoneyImpl():: Both EMs joined our XA Transaction...
    14:51:58.092 transferMoneyImpl():: Before Commit ...
    14:51:58.160 transferMoneyImpl():: Tx Commit worked !
    14:51:58.165 Database Name:BANKA -- Account: User99_at_BANKA -- Balance: 11000.0
    14:51:58.168 Database Name:BANKB -- Account: User98_at_BANKB -- Balance: 9000.0
    14:51:58.169 transferMoneyImpl():: Leaving with TX Status:: [UT status:  6 - STATUS_NO_TRANSACTION]
    
    -> We successfully managed to transfer some money from bankA to bankB !
    

    Testing Rollback operation with EM flush enabled [ transaction status : STATUS_MARKED_ROLLBACK ]

    Account Balance
     transferMoneyImpl():: Account at bank A: User99_at_BANKA - Balance: 20000
     transferMoneyImpl():: Account at bank B: User98_at_BANKB - Balance: 0
    Note the next money transfer/transaction should trigger a constraint violation ! 
    
    Call Flow
    - Get EntityManager for RAC Database1 [ em1=getEntityManager1(); ]
    - Get EntityManager for RAC Database2 [ em2=getEntityManager2(); ]
    - Start a User transaction             [ ut.begin(); ] 
    - Join transaction from EntityManager 1  [ em1.joinTransaction(); ]
    - Join transaction from EntityManager 2  [ em2.joinTransaction(); ]
    - Chance Balance on both databases
         bankA_acct.setBalance( bankA_acct.getBalance().add(b) );
            em1.merge(bankA_acct);
            if (isEnableFlush() )
              em1.flush();
                    
            bankB_acct.setBalance( bankB_acct.getBalance().subtract(b) );
            em2.merge(bankB_acct);           
            if (isEnableFlush() )
              em2.flush();              
    - em2.flush is failing due to a constraint violation and set the TX status to  : STATUS_MARKED_ROLLBACK 
       Error : org.hibernate.exception.ConstraintViolationException: could not execute statement
    - Exception handler checks transaction status : STATUS_MARKED_ROLLBACK and is rolling back the TX
           if ( status != javax.transaction.Status.STATUS_NO_TRANSACTION   ) 
             {
             ut.rollback();
             ...
    - After rollback() transaction status changed to   STATUS_NO_TRANSACTION                      
      
    Application log :
    15:11:03.920 transferMoneyImpl():: Found both Entity Managers for PUs : RacBankAHibPU and RacBankBHibPU
    15:11:03.929 transferMoneyImpl():: Account at bank A: User99_at_BANKA - Balance: 20000
    15:11:03.931 transferMoneyImpl():: Account at bank B: User98_at_BANKB - Balance: 0
    15:11:03.931 transferMoneyImpl():: Both EMs joined our XA Transaction... 
    15:11:03.960 transferMoneyImpl():: FATAL ERROR - Tx Status : [UT status:  1 - STATUS_MARKED_ROLLBACK]
    15:11:03.962 transferMoneyImpl():: Before TX rollback ... 
    15:11:03.974 transferMoneyImpl():: TX rollback worked !
    15:11:03.974 transferMoneyImpl():: Leaving with TX Status:: [UT status:  6 - STATUS_NO_TRANSACTION]
    
    
    Exception stack :
    15:11:03.960 Error in top level function: transferMoneyImpl():: 
    15:11:03.960 org.hibernate.exception.ConstraintViolationException: could not execute statement
    15:11:03.961 javax.persistence.PersistenceException: org.hibernate.exception.ConstraintViolationException: could not execute statement
        at org.hibernate.jpa.spi.AbstractEntityManagerImpl.convert(AbstractEntityManagerImpl.java:1763)
        at org.hibernate.jpa.spi.AbstractEntityManagerImpl.convert(AbstractEntityManagerImpl.java:1677)
        at org.hibernate.jpa.spi.AbstractEntityManagerImpl.convert(AbstractEntityManagerImpl.java:1683)
        at org.hibernate.jpa.spi.AbstractEntityManagerImpl.flush(AbstractEntityManagerImpl.java:1338)
        at com.hhu.wfjpa2pc.Jpa2pcTest.transferMoneyImpl(Jpa2pcTest.java:235)
        at com.hhu.wfjpa2pc.Jpa2pcTest.transferMoney(Jpa2pcTest.java:166)
            ..
    Caused by: org.hibernate.exception.ConstraintViolationException: could not execute statement
        at org.hibernate.exception.internal.SQLExceptionTypeDelegate.convert(SQLExceptionTypeDelegate.java:72)
        at org.hibernate.exception.internal.StandardSQLExceptionConverter.convert(StandardSQLExceptionConverter.java
        ... 
    Caused by: java.sql.SQLIntegrityConstraintViolationException: ORA-02290: check constraint (SCOTT.S_LOWER_CHK) violated
    

    Testing Rollback operation without EM flush enabled [ transaction status : STATUS_NO_TRANSACTION  ]

    Account Balance
     transferMoneyImpl():: Account at bank A: User99_at_BANKA - Balance: 20000
     transferMoneyImpl():: Account at bank B: User98_at_BANKB - Balance: 0
    Note the next money transfer/transaction should trigger a constraint violation ! 
    
    Call Flow
    - Get EntityManager for RAC Database1 [ em1=getEntityManager1(); ]
    - Get EntityManager for RAC Database2 [ em2=getEntityManager2(); ]
    - Start a User transaction            [ ut.begin(); ] 
    - Join transaction from EntityManager 1  [ em1.joinTransaction(); ]
    - Join transaction from EntityManager 2  [ em2.joinTransaction(); ]
    - Chance Balance on both databases
         bankA_acct.setBalance( bankA_acct.getBalance().add(b) );
            em1.merge(bankA_acct);
            if (isEnableFlush() )
              em1.flush();
                    
            bankB_acct.setBalance( bankB_acct.getBalance().subtract(b) );
            em2.merge(bankB_acct);           
            if (isEnableFlush() )
              em2.flush();        
    - Commit the Transaction [ ut.commit(); ] fails with :  ARJUNA016053: Could not commit transaction.
    - As the Commit itself fails Wildfly rollback the transaction 
    - Tx Status after COMMIT error :  STATUS_NO_TRANSACTION 
    - Exception handler checks transaction status : STATUS_MARKED_ROLLBACK and is not rolling back the TX
           if ( status != javax.transaction.Status.STATUS_NO_TRANSACTION   ) 
             {
             ut.rollback();
             ...
    - Here we don't run any rollback operation -> the TX status remains at   STATUS_NO_TRANSACTION                      
      
    Application log :
      15:27:53.818 transferMoneyImpl():: Found both Entity Managers for PUs : RacBankAHibPU and RacBankBHibPU
      15:27:53.827 transferMoneyImpl():: Account at bank A: User99_at_BANKA - Balance: 20000
      15:27:53.829 transferMoneyImpl():: Account at bank B: User98_at_BANKB - Balance: 0
      15:27:53.829 transferMoneyImpl():: Both EMs joined our XA Transaction... 
      15:27:53.829 transferMoneyImpl():: Before Commit ... 
      15:27:53.857 transferMoneyImpl():: FATAL ERROR - Tx Status : [UT status:  6 - STATUS_NO_TRANSACTION]
      15:27:53.859 transferMoneyImpl():: TX not active / TX already rolled back
      15:27:53.859 transferMoneyImpl():: Leaving with TX Status:: [UT status:  6 - STATUS_NO_TRANSACTION]
    

    Testing transaction Recovery with JPA

    What we are expecting  and what we are testing
      - Transaction Timeout is set to 600 seconds
      - We set a breakpoint at   OracleXAResource.commit
        ==> This means Wildfly has written a COMMIT record to  the  Wildlfly LOG-STORE
      - After stop at the first OracleXAResource.commit breakpoint  we kill the Wildfly server 
      - Both RMs [ Oracle RAC databases ] are now counting down the Transaction Timeout 
      - If Timeout is reached the failed transaction becomes visible in dba_2pc_pending table
      - Trying to get a lock on these records should lead to a ORA-1591 error 
      - After Wildfly restart the Periodic Recovery should run OracleXAResource.commit and release all locks
    
    Preparing and running the test scenario
    
    Start Wildfly in Debug Mode :
    Set breakpoint on OracleXAResource.commit and run the application
    
    Stack Trace 
    "default task-3"
    oracle.jdbc.xa.client.OracleXAResource.commit(OracleXAResource.java:553)
    org.jboss.jca.adapters.jdbc.xa.XAManagedConnection.commit(XAManagedConnection.java:338)
    org.jboss.jca.core.tx.jbossts.XAResourceWrapperImpl.commit(XAResourceWrapperImpl.java:107)
    com.arjuna.ats.internal.jta.resources.arjunacore.XAResourceRecord.topLevelCommit(XAResourceRecord.java:461)
    com.arjuna.ats.arjuna.coordinator.BasicAction.doCommit(BasicAction.java:2810)
    com.arjuna.ats.arjuna.coordinator.BasicAction.doCommit(BasicAction.java:2726)
    com.arjuna.ats.arjuna.coordinator.BasicAction.phase2Commit(BasicAction.java:1820)
    com.arjuna.ats.arjuna.coordinator.BasicAction.End(BasicAction.java:1504)
    com.arjuna.ats.arjuna.coordinator.TwoPhaseCoordinator.end(TwoPhaseCoordinator.java:96)
    com.arjuna.ats.arjuna.AtomicAction.commit(AtomicAction.java:162)
    com.arjuna.ats.internal.jta.transaction.arjunacore.TransactionImple.commitAndDisassociate(TransactionImple.java:1166)
    com.arjuna.ats.internal.jta.transaction.arjunacore.BaseTransaction.commit(BaseTransaction.java:126)
    com.arjuna.ats.jbossatx.BaseTransactionManagerDelegate.commit(BaseTransactionManagerDelegate.java:75)
    org.jboss.tm.usertx.client.ServerVMClientUserTransaction.commit(ServerVMClientUserTransaction.java:173)
    com.hhu.wfjpa2pc.Jpa2pcTest.transferMoneyImpl(Jpa2pcTest.java:242)
    com.hhu.wfjpa2pc.Jpa2pcTest.transferMoney(Jpa2pcTest.java:166)
    
    Wildfly Check for prepared transaction 
    $ $WILDFLY_HOME/bin/jboss-cli.sh --connect --file=list_prepared_xa_tx.cli
    {"outcome" => "success"}
    0:ffffc0a805c9:f5a10ef:56039e68:d
    
    Locate and kill JBOSS server process 
    0 S oracle    5875  5821  7  80   0 - 413473 futex_ 08:55 ?       00:00:30 
         /usr/java/latest/bin/java .... -Djboss.server.base.dir=/usr/local/wildfly-8.2.0.Final/standalone -c standalone.xml
    0 S oracle    6174  5680  0  80   0 - 25827 pipe_w 09:02 pts/1    00:00:00 grep java
    [oracle@wls1 WILDFLY]$ kill -9 5875
    
    Now wait [ at lest 600 seconds ] until the Transaction becomes visible in  dba_2pc_pending
    
    SQL> SELECT * FROM GLOBAL_NAME;
    GLOBAL_NAME
    ----------------
    BANKA
    
    SQL> select * from dba_2pc_pending;
    LOCAL_TRAN_ID           GLOBAL_TRAN_ID                            STATE         MIX A TRAN_COMMENT
    ---------------------- ---------------------------------------------------------------- ---------------- --- - ----------------
    FAIL_TIM FORCE_TI RETRY_TI OS_USER    OS_TERMINAL  HOST          DB_USER       COMMIT#
    -------- -------- -------- ------------ ------------ ---------------- ------------ ----------------
    9.21.7139           131077.00000000000000000000FFFFC0A805C90F5A10EF56039E680000000D3 prepared     no
                   1
    09:07:22      09:15:34 oracle    unknown      wls1.example.com           43619336
    
    
    
    SQL> SELECT * FROM GLOBAL_NAME;
    GLOBAL_NAME
    ----------------
    BANKB
    
    SQL> select * from dba_2pc_pending;
    LOCAL_TRAN_ID           GLOBAL_TRAN_ID                            STATE         MIX A TRAN_COMMENT
    ---------------------- ---------------------------------------------------------------- ---------------- --- - ----------------
    FAIL_TIM FORCE_TI RETRY_TI OS_USER    OS_TERMINAL  HOST          DB_USER       COMMIT#
    -------- -------- -------- ------------ ------------ ---------------- ------------ ----------------
    4.15.3293           131077.00000000000000000000FFFFC0A805C90F5A10EF56039E680000000D3 prepared     no
                   1
    09:07:22      09:15:34 oracle    unknown      wls1.example.com           20931538
    
    Check for locks 
    -> Connected to  scott/tiger@ract2-scan.grid12c.example.com:1521/banka
    select * from accounts for update
    *
    ERROR at line 1:
    ORA-01591: lock held by in-doubt distributed transaction 9.21.7139
    
    
    -> Connected to  scott/tiger@ract2-scan.grid12c.example.com:1521/bankb
    select * from accounts for update
    *
    ERROR at line 1:
    ORA-01591: lock held by in-doubt distributed transaction 4.15.3293
    
    
    Restart Wildfly in Debug Mode and let the Periodic Recovery Thread commit the transaction 
    
    "Periodic Recovery"
    oracle.jdbc.xa.client.OracleXAResource.commit(OracleXAResource.java:553)
    org.jboss.jca.adapters.jdbc.xa.XAManagedConnection.commit(XAManagedConnection.java:338)
    org.jboss.jca.core.tx.jbossts.XAResourceWrapperImpl.commit(XAResourceWrapperImpl.java:107)
    com.arjuna.ats.internal.jta.resources.arjunacore.XAResourceRecord.topLevelCommit(XAResourceRecord.java:461)
    com.arjuna.ats.arjuna.coordinator.BasicAction.doCommit(BasicAction.java:2810)
    com.arjuna.ats.arjuna.coordinator.BasicAction.doCommit(BasicAction.java:2726)
    com.arjuna.ats.arjuna.coordinator.BasicAction.phase2Commit(BasicAction.java:1820)
    com.arjuna.ats.arjuna.recovery.RecoverAtomicAction.replayPhase2(RecoverAtomicAction.java:71)
    com.arjuna.ats.internal.arjuna.recovery.AtomicActionRecoveryModule.doRecoverTransaction(AtomicActionRecoveryModule.java:152)
    com.arjuna.ats.internal.arjuna.recovery.AtomicActionRecoveryModule.processTransactionsStatus(AtomicActionRecoveryModule.java:253)
    com.arjuna.ats.internal.arjuna.recovery.AtomicActionRecoveryModule.periodicWorkSecondPass(AtomicActionRecoveryModule.java:109)
    com.arjuna.ats.internal.arjuna.recovery.PeriodicRecovery.doWorkInternal(PeriodicRecovery.java:789)
    com.arjuna.ats.internal.arjuna.recovery.PeriodicRecovery.run(PeriodicRecovery.java:371)
    
    -> WildFly Thread Periodic Recovery stops at OracleXAResource.commit
    -> Press Debugger Command : Continue 
    -> WildFly Thread Periodic Recovery has committed Transaction Branch 1
    -> WildFly Thread Periodic Recovery stops again  at .OracleXAResource.commit
    -> Press Debugger Command : Continue 
    -> WildFly Thread Periodic Recovery has committed Transaction Branch 2
    -> Complete Transaction is now committed 
    
    Verify Access to the Database records and Wildfly Prepared Transaction Cleanup 
    -> Connected to  scott/tiger@ract2-scan.grid12c.example.com:1521/banka
    ACCOUNT                 BALANCE
    -------------------------------- ----------
    User99_at_BANKA               14000
    
    
    -> Connected to  scott/tiger@ract2-scan.grid12c.example.com:1521/bankb
    ACCOUNT                 BALANCE
    -------------------------------- ----------
    User98_at_BANKB                6000
    
    
    List prepared Transaction
    $  $WILDFLY_HOME/bin/jboss-cli.sh --connect --file=list_prepared_xa_tx.cli
    {"outcome" => "success"}
    
    -> After a successful transaction recovery the locks are gone 
    

     

    Java Code

    public void transferMoneyImpl()
          {
            String methodName = "transferMoneyImpl():: ";
            EntityManager em1;
            EntityManager em2;
            UserTransaction ut =null;
          try
            {
            setRunTimeInfo(methodName  + "Entering ... ");
                
            HttpSession session = (HttpSession) FacesContext.getCurrentInstance().getExternalContext().getSession(true);
            if ( session == null)
                  {
                    throw new IllegalArgumentException(methodName+ ": Could not get HTTP session : ");    
                  }                        
            final Object lock = session.getId().intern();       
            synchronized(lock) 
                  {
                    em1=getEntityManager1();
                    em2=getEntityManager2();
                        //
                        // Note even we get an EntityManager Object we still not sure that the 
                        // EntityManager Could open connection the underlying JDBC connection !
                        //
                    if ( em1 == null )
                        setRunTimeInfo(methodName  + "Faild to get EM for PU: " + EMF.getPU1() );
                    else if ( em2 == null )
                        setRunTimeInfo(methodName  + "Faild to get EM for PU: " + EMF.getPU2() );
                    else
                        setRunTimeInfo(methodName  + "Found both Entity Managers for PUs : " + 
                           EMF.getPU1()  + " and " +  EMF.getPU2()  ); 
                       
                     
                    String bankA_acct_name = "User99_at_BANKA";
                    Accounts bankA_acct = em1.find(Accounts.class, bankA_acct_name);
                    if ( bankA_acct == null)
                        { 
                        setRunTimeInfo(methodName + "Could not locate Account at bankA : " + bankA_acct_name );
                        return;
                        }
                    setRunTimeInfo(methodName  +"Account at bank A: " + bankA_acct.getAccount()  + " - Balance: " +  bankA_acct.getBalance() );
                    
                    String bankB_acct_name = "User98_at_BANKB";
                    Accounts bankB_acct = em2.find(Accounts.class, bankB_acct_name);
                    if ( bankB_acct == null)
                        { 
                        setRunTimeInfo(methodName + "Could not locate Account at bankB : " + bankB_acct_name );
                        return;
                        }
                    setRunTimeInfo(methodName  +"Account at bank B: " + bankB_acct.getAccount()  + " - Balance: " +  bankB_acct.getBalance() );
                  
                    ut  = (javax.transaction.UserTransaction)new InitialContext().lookup("java:comp/UserTransaction"); 
                        // Set tranaction time to 120 seconds to avoid any timeouts during testing -
                        // especially when testing transaction recovery by restarting Wildfly server 
                        // Note as we kill the JAVA process both RMs will wait 120 s before Tx becomes visible in dba_2pc_pending 
                    int tx_timeout = 120;
                    ut.setTransactionTimeout(tx_timeout);
                    ut.begin();
                    em1.joinTransaction();
                    em2.joinTransaction();
                    setRunTimeInfo(methodName  + "Both EMs joined our XA Transaction... - TX Timeout: " + tx_timeout );
                    BigDecimal b = new BigDecimal(1000);
                    bankA_acct.setBalance( bankA_acct.getBalance().add(b) );
                    em1.merge(bankA_acct);
                    if (isEnableFlush() )
                        em1.flush();
                    
                    bankB_acct.setBalance( bankB_acct.getBalance().subtract(b) );
                    em2.merge(bankB_acct);           
                    if (isEnableFlush() )
                        em2.flush();
                    
                    setRunTimeInfo(methodName  + "Before Commit ... ");                
                    ut.commit();
                    setRunTimeInfo(methodName  + "Tx Commit worked !");
                    checkBalanceImpl();
                  }
            } catch ( Throwable t1)
              { 
                try
                  {    
                  String tx_status = returnTXStatus(ut);
                  setRunTimeInfo( methodName  + "FATAL ERROR - Tx Status : " + tx_status  );
                    // Use Throwable as we don't want to loose any important imformation
                    // Note: Throwable is super class of Exception class          
                   genericException("Error in top level function: " + methodName , (Exception)t1);                          
                   if ( ut != null )
                      {
                        int status = ut.getStatus();    
                            // rollback transaction if still active - if not do nothing 
                        if ( status != javax.transaction.Status.STATUS_NO_TRANSACTION   ) {
                            setRunTimeInfo(methodName  + "Before TX rollback ... ");
                            ut.rollback();
                            setRunTimeInfo(methodName  + "TX rollback worked !");
                        } else
                            setRunTimeInfo(methodName  + "TX not active / TX already rolled back");
                      }
                  }  catch ( Throwable t2)
                     { 
                       genericException(methodName + "FATAL ERROR during ut.rollback() ", (Exception)t2); 
                     } 
              }
            closeEntityManagers();       
            String tx_status_exit = "";
            try
              {    
                tx_status_exit = returnTXStatus(ut);
              }   catch ( Throwable t3)
                { 
                  genericException(methodName + " Error during returning TX status ", (Exception)t3); 
                }    
            setRunTimeInfo(methodName  + "Leaving with TX Status:: " + tx_status_exit );
          }
    

    Reference

    Install 12.2 Oracle Domain Service Cluster with Virtualbox env

    Overview Domain Service Cluster

    -> From Cluster Domains ORACLE WHITE PAPER

    Domain Services Cluster Key Facts
    DSC: 
    The Domain Services Cluster is the heart of the Cluster Domain, as it is configured to provide the services that will be utilized by the various Member Clusters within the Cluster Domain. As per the name, it is a cluster itself, thus providing the required high availability and scalability for the provisioned services.
    
    GIMR :
    The centralized GIMR is host to cluster health and diagnostic information for all the clusters in the Cluster Domain.  As such, it is accessed by the client applications of the Autonomous Health Framework (AHF), the Trace File Analyzer (TFA) facility and Rapid Home Provisioning (RHP) Server across the Cluster Domain.  
    Thus, it acts in support of the DSC’s role as the management hub.
    
    IOServer [ promised with 12.1 - finally implemented with 12.2 ]
    Configuring the Database Member Cluster to use an indirect I/O path to storage is simpler still, requiring no locally configured shared storage, thus dramatically improving the ease of deploying new clusters, and changing the shared storage for those clusters (adding disks to the storage is done at the DSC - an invisible operation to the Database Member Cluster).
    Instead, all database I/O operations are channeled through the IOServer processes on the DSC.  From the database instances on the Member Cluster, the database’s data files are fully accessible and seen as individual files, exactly as they would be with locally attached shared storage.  
    The real difference is that the actual I/O operation is handed off to the IOServers on the DSC instead of being processed locally on the nodes of
    the Member Cluster.  The major benefit of this approach is that new Database Member Clusters don’t need to be configured with locally  attached shared storage, making deployment simpler and easier
    
    Rapid Home Provisioning Server
    The Domain Services Cluster may also be configured to host a Rapid Home Provisioning (RHP) server.  RHP is used to manage the provisioning, patching and upgrading of the Oracle Database and GI software stacks
    and any other critical software across the Member Clusters in the Cluster Domain.  Through this service, the RHP server would be used to maintain the currency of the installations on the Member Clusters as RHP clients, thus simplifying and standardizing the deployments across the Cluster Domain.
    
    
    
    The services available consist of
    • Grid Infrastructure Management Repository ( GIMR)
    • ASM Storage service
    • IOServer service
    • Rapid Home Provisioning Server

     Domain Service Cluster Resources

    •  If you think that 12.1.0.2 RAC Installation is a Resource Monster than your are completely wrong
    • A 12.2 Domain Service Cluster installation  will eaten up even much more resources
     
    Memory Resource Calculation  when trying to setup Domain Serivce cluster with 16 GByte Memory 
    VM DSC System1 [     running GIMR Database ] : 7 GByte
    VM DSC System2 [ NOT running GIMR Database ] : 6 GByte
    VM NameServer                                : 1 GByte
    Window 7 Host                                : 2 GByte 
    
    I really think we need 32 GByte Memory for running a  Domain Service cluster  ...
    But as s I'm waiting on a 16 GByte memory  upgrade I will try to run the setup with 16 GByte memory.
    
    The major problem are the GIMR Database Memory Requirements [ see DomainServicesCluster_GIMR.dbc ]
     - sga_target           : 4 GByte 
     - pga_aggregate_target : 2 GByte 
    
    This will kill my above 16 Gyte setup so I need to change  DomainServicesCluster_GIMR.dbc. 
    

     

    Disk Requirements 
    Shared Disks 
    03.05.2017  19:09    21.476.933.632 asm1_dsc_20G.vdi
    03.05.2017  19:09    21.476.933.632 asm2_dsc_20G.vdi
    03.05.2017  19:09    21.476.933.632 asm3_dsc_20G.vdi
    03.05.2017  19:09    21.476.933.632 asm4_dsc_20G.vdi
    03.05.2017  19:09   107.376.279.552 asm5_GIMR_100G.vdi
    03.05.2017  19:09   107.376.279.552 asm6_GIMR_100G.vdi
    03.05.2017  19:09   107.376.279.552 asm7_GIMR_100G.vdi
    
    Disk Group +DATA : 4 x 50 GByte Mode : Normal
    Disk Group +GIMR : 3 x 100 GByte 
                     : Mode : External
                     : Space Required during Installation : 289 GByte  
                     : Space provided: 300 GByte
    
    04.05.2017  08:48    22.338.863.104 dsctw21.vdi
    03.05.2017  21:44                 0 dsctw21_OBASE_120G.vdi
    03.05.2017  18:03    <DIR>          dsctw22
    04.05.2017  08:48    15.861.809.152 dsctw22.vdi
    03.05.2017  21:43                 0 dsctw22_OBASE_120G.vdi
    per RAC VM :  50 GByte for OS, Swap, GRID Software installation
               : 120 GByte for ORACLE_BASE 
                   : Space Required for ORACLE_BASE during Installation : 102 GByte  
                   : Space provided: 120 GByte  
    
    This translate to about 450 GByte Diskspace for installiong a  Domain Service Cluster
    
    Note:  -> Disk Space Resources may are quite huge for this type of installation
           ->  For the GIMR disk group we need 300 GByte space with EXTERNAL redundancy
     
    
    Network Requirements  
    GNS Entry
    
    Name Server Entry for GNS
    $ORIGIN ggrid.example.com.
    @       IN          NS        ggns.ggrid.example.com. ; NS  grid.example.com
            IN          NS        ns1.example.com.      ; NS example.com
    ggns    IN          A         192.168.5.60 ; glue record
    
    NOTE : For the GIMR disk group we need 300 GByte space wiht EXTERNAL redundancy

    Cluvfy commands to verify our RAC VMs

    [grid@dsctw21 linuxx64_12201_grid_home]$ cd  /media/sf_kits/Oracle/122/linuxx64_12201_grid_home 
    
     [grid@dsctw21 linuxx64_12201_grid_home]$ runcluvfy.sh comp admprv -n "ractw21,ractw22" -o user_equiv -verbose -fixup
     
    [grid@dsctw21 linuxx64_12201_grid_home]$ ./runcluvfy.sh stage -pre crsinst -fixup -n  dsctw21 
    
    [grid@dsctw21 linuxx64_12201_grid_home]$  ./runcluvfy.sh  comp gns -precrsinst -domain  dsctw2.example.com  -vip 192.168.5.60 -verbose 
    
    [grid@dsctw21 linuxx64_12201_grid_home]$  ./runcluvfy.sh  comp gns -precrsinst -domain  dsctww2.example.com  -vip 192.168.5.60 -verbose 
         
     [grid@dsctw21 linuxx64_12201_grid_home]$  runcluvfy.sh comp dns -server -domain ggrid.example.com -vipaddress 192.168.5.60/255.255.255.0/enp0s8 -verbose -method root 
          -> The server command should block here
    [grid@dsctw21 linuxx64_12201_grid_home]$ ./runcluvfy.sh comp  dns -client -domain   dsctw2.example.com -vip  192.168.5.60  -method root -verbose -last  
        -> The client command with -last should terminate the server too 
    
    Only memory related errors like PRVF-7530 and DNS configuration check errors should be ignored if you run your VM with less that 8 GByte memory 
    
    Verifying Physical Memory ...FAILED dsctw21: PRVF-7530 : Sufficient physical memory is not available on node          "dsctw21" [Required physical memory = 8GB (8388608.0KB)] 
    
    Task DNS configuration check - This task verifies if GNS subdomain delegation has been implemented in the DNS . This Warning could be ignored too as GNS is not running YET
    
    

    Create ASM Disks

    Create the ASM Disks for +DATA Disk Group holding OCR, Voting Disks 
    
    M:\VM\DSCRACTW2>VBoxManage createhd --filename M:\VM\RACTW2\asm1_dsc_20G.vdi --size 20480 --format VDI --variant Fixed
    0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
    Medium created. UUID: 8c914ad2-30c0-4c4d-88e0-ff94aef761c8
    
    M:\VM\DSCRACTW2>VBoxManage createhd --filename M:\VM\RACTW2\asm2_dsc_20G.vdi --size 20480 --format VDI --variant Fixed
    0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
    Medium created. UUID: 72791d07-9b21-41dd-8630-483902343e22
    
    M:\VM\DSCRACTW2>VBoxManage createhd --filename M:\VM\RACTW2\asm3_dsc_20G.vdi --size 20480 --format VDI --variant Fixed
    0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
    Medium created. UUID: 7f5684e6-e4d2-47ab-8166-b259e3e626e5
    
    M:\VM\DSCRACTW2>VBoxManage createhd --filename M:\VM\RACTW2\asm4_dsc_20G.vdi --size 20480 --format VDI --variant Fixed
    0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
    Medium created. UUID: 2c564704-46ad-4f37-921b-e56f0812c0bf
    
    M:\VM\DSCRACTW2>VBoxManage modifyhd  asm1_dsc_20G.vdi  --type shareable
    M:\VM\DSCRACTW2>VBoxManage modifyhd  asm2_dsc_20G.vdi  --type shareable
    M:\VM\DSCRACTW2>VBoxManage modifyhd  asm3_dsc_20G.vdi  --type shareable
    M:\VM\DSCRACTW2>VBoxManage modifyhd  asm4_dsc_20G.vdi  --type shareable
    
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw21 --storagectl "SATA" --port 1 --device 0 --type hdd --medium asm1_dsc_20G.vdi  --mtype shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw21 --storagectl "SATA" --port 2 --device 0 --type hdd --medium asm2_dsc_20G.vdi  --mtype shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw21 --storagectl "SATA" --port 3 --device 0 --type hdd --medium asm3_dsc_20G.vdi  --mtype shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw21 --storagectl "SATA" --port 4 --device 0 --type hdd --medium asm4_dsc_20G.vdi  --mtype shareable
    
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw22 --storagectl "SATA" --port 1 --device 0 --type hdd --medium asm1_dsc_20G.vdi  --mtype shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw22 --storagectl "SATA" --port 2 --device 0 --type hdd --medium asm2_dsc_20G.vdi  --mtype shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw22 --storagectl "SATA" --port 3 --device 0 --type hdd --medium asm3_dsc_20G.vdi  --mtype shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw22 --storagectl "SATA" --port 4 --device 0 --type hdd --medium asm4_dsc_20G.vdi  --mtype shareable
    
    Create and attach the GIMR Disk Group
    M:\VM\DSCRACTW2>VBoxManage createhd --filename M:\VM\DSCRACTW2\asm5_GIMR_100G.vdi --size 102400 --format VDI --variant Fixed
    0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
    Medium created. UUID: 8604878c-8c73-421a-b758-4ef5bf0a3d61
    M:\VM\DSCRACTW2>VBoxManage modifyhd  asm5_GIMR_100G.vdi  --type shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw21 --storagectl "SATA" --port 5 --device 0 --type hdd --medium asm5_GIMR_100G.vdi  --mtype shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw22 --storagectl "SATA" --port 5 --device 0 --type hdd --medium asm5_GIMR_100G.vdi  --mtype shareable
    
    M:\VM\DSCRACTW2>VBoxManage createhd --filename M:\VM\DSCRACTW2\asm6_GIMR_100G.vdi --size 102400 --format VDI --variant Fixed
    0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
    Medium created. UUID: 8604878c-8c73-421a-b758-4ef5bf0a3d61
    M:\VM\DSCRACTW2>VBoxManage modifyhd  asm6_GIMR_100G.vdi  --type shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw21 --storagectl "SATA" --port 6 --device 0 --type hdd --medium asm6_GIMR_100G.vdi  --mtype shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw22 --storagectl "SATA" --port 6 --device 0 --type hdd --medium asm6_GIMR_100G.vdi  --mtype shareable
    
    M:\VM\DSCRACTW2>VBoxManage createhd --filename M:\VM\DSCRACTW2\asm7_GIMR_100G.vdi --size 102400 --format VDI --variant Fixed
    0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
    Medium created. UUID: 8604878c-8c73-421a-b758-4ef5bf0a3d61
    M:\VM\DSCRACTW2>VBoxManage modifyhd  asm7_GIMR_100G.vdi  --type shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw21 --storagectl "SATA" --port 7 --device 0 --type hdd --medium asm7_GIMR_100G.vdi  --mtype shareable
    M:\VM\DSCRACTW2>VBoxManage storageattach dsctw22 --storagectl "SATA" --port 7 --device 0 --type hdd --medium asm7_GIMR_100G.vdi  --mtype shareable
    
    Create and Attach the ORACLE_BASE disks - each VM gets its own ORACLE_BASE disk
    M:\VM\DSCRACTW2>VBoxManage createhd --filename M:\VM\DSCRACTW2\dsctw21_OBASE_120G.vdi --size 122800 --format VDI --variant Fixed
    0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
    Medium created. UUID: 35ab9546-2967-4f43-9a52-305906ff24e1
    M:\VM\DSCRACTW2>VBoxManage createhd --filename M:\VM\DSCRACTW2\dsctw22_OBASE_120G.vdi --size 122800 --format VDI --variant Fixed
    0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
    Medium created. UUID: 32e1fcaa-9609-4027-968e-2d35d33584a8
    
    M:\VM\DSCRACTW2> VBoxManage storageattach dsctw21 --storagectl "SATA" --port 8 --device 0 --type hdd --medium dsctw21_OBASE_120G.vdi 
    M:\VM\DSCRACTW2> VBoxManage storageattach dsctw22 --storagectl "SATA" --port 8 --device 0 --type hdd --medium dsctw22_OBASE_120G.vdi 
    
    You may use parted to configure and mount the Diskspace 
    
    The Linux XFS file systems should NOW look like the following 
    [root@dsctw21 app]# df / /u01 /u01/app/grid
    Filesystem                  1K-blocks    Used Available Use% Mounted on
    /dev/mapper/ol_ractw21-root  15718400 9085996   6632404  58% /
    /dev/mapper/ol_ractw21-u01   15718400 7409732   8308668  48% /u01
    /dev/sdi1                   125683756   32928 125650828   1% /u01/app/grid
    
    
    • See Chapter : Using parted to create a new ORACLE_BASE partition for a Domain Service Cluster in the following article

    Disk protections for our ASM disks

    • Disk label should be msdos
    • To allow the installation process to pick up the disk set following protections
    Model: ATA VBOX HARDDISK (scsi)
    Disk /dev/sdb: 21.5GB
    Sector size (logical/physical): 512B/512B
    Partition Table: msdos
    Disk Flags: 
    ..
    brw-rw----. 1 grid asmadmin 8,  16 May  5 08:21 /dev/sdb
    brw-rw----. 1 grid asmadmin 8,  32 May  5 08:21 /dev/sdc
    brw-rw----. 1 grid asmadmin 8,  48 May  5 08:21 /dev/sdd
    brw-rw----. 1 grid asmadmin 8,  64 May  5 08:21 /dev/sde
    brw-rw----. 1 grid asmadmin 8,  80 May  5 08:21 /dev/sdf
    brw-rw----. 1 grid asmadmin 8,  96 May  5 08:21 /dev/sdg
    • If you need to recover from a failed Installation and disk are already labeled by AFD please read:

    Start the installation process

    Unset the ORACLE_BASE environment variable.
    [grid@dsctw21 grid]$ unset ORACLE_BASE
    [grid@dsctw21 ~]$ cd $GRID_HOME
    [grid@dsctw21 grid]$ pwd
    /u01/app/122/grid
    [grid@dsctw21 grid]$ unzip -q  /media/sf_kits/Oracle/122/linuxx64_12201_grid_home.zip
    As root allow X-Windows app. to run on this node from any host  
    [root@dsctw21 ~]# xhost +
    access control disabled, clients can connect from any host
    [grid@dsctw21 grid]$ export DISPLAY=:0.0
    
    If your are running a test env with low memory resources [ <= 16 GByte ] don't forget to limit the GIMR memory requirements by reading: 
    
    Start of GIMR database fails during 12.2 installation
    
    Now start the Oracle Grid Infrastructure installer by running the following command:
    
    [grid@dsctw21 grid]$ ./gridSetup.sh
    Launching Oracle Grid Infrastructure Setup Wizard...
    
    

    Initial Installation Steps

     Run requrired Server root scripts: 
    
    [root@dsctw22 app]# /u01/app/oraInventory/orainstRoot.sh 
    
    Running root.sh on first Rac Node:
    [root@dsctw21 ~]# /u01/app/122/grid/root.sh
    Performing root user operation.
    
    The following environment variables are set as:
        ORACLE_OWNER= grid
        ORACLE_HOME=  /u01/app/122/grid
    ...
    Now product-specific root actions will be performed.
    Relinking oracle with rac_on option
    Using configuration parameter file: /u01/app/122/grid/crs/install/crsconfig_params
    The log of current session can be found at:
      /u01/app/grid/crsdata/dsctw21/crsconfig/rootcrs_dsctw21_2017-05-04_12-22-04AM.log
    2017/05/04 12:22:07 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.
    2017/05/04 12:22:07 CLSRSC-4001: Installing Oracle Trace File Analyzer (TFA) Collector.
    2017/05/04 12:22:07 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
    2017/05/04 12:22:07 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.
    2017/05/04 12:22:09 CLSRSC-363: User ignored prerequisites during installation
    2017/05/04 12:22:09 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.
    2017/05/04 12:22:11 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.
    2017/05/04 12:22:12 CLSRSC-594: Executing installation step 5 of 19: 'SaveParamFile'.
    2017/05/04 12:22:13 CLSRSC-594: Executing installation step 6 of 19: 'SetupOSD'.
    2017/05/04 12:22:16 CLSRSC-594: Executing installation step 7 of 19: 'CheckCRSConfig'.
    2017/05/04 12:22:16 CLSRSC-594: Executing installation step 8 of 19: 'SetupLocalGPNP'.
    2017/05/04 12:22:18 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.
    2017/05/04 12:22:19 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.
    2017/05/04 12:22:19 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.
    2017/05/04 12:22:20 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.
    2017/05/04 12:22:21 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
    2017/05/04 12:22:23 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
    2017/05/04 12:22:24 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
    2017/05/04 12:22:28 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.
    CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'dsctw21'
    CRS-2673: Attempting to stop 'ora.ctssd' on 'dsctw21'
    
    ....
    CRS-2676: Start of 'ora.diskmon' on 'dsctw21' succeeded
    CRS-2676: Start of 'ora.cssd' on 'dsctw21' succeeded
    
    Disk label(s) created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-170504PM122337.log for details.
    Disk groups created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-170504PM122337.log for details.
    
    2017/05/04 12:24:28 CLSRSC-482: Running command: '/u01/app/122/grid/bin/ocrconfig -upgrade grid oinstall'
    CRS-2672: Attempting to start 'ora.crf' on 'dsctw21'
    CRS-2672: Attempting to start 'ora.storage' on 'dsctw21'
    CRS-2676: Start of 'ora.storage' on 'dsctw21' succeeded
    CRS-2676: Start of 'ora.crf' on 'dsctw21' succeeded
    CRS-2672: Attempting to start 'ora.crsd' on 'dsctw21'
    CRS-2676: Start of 'ora.crsd' on 'dsctw21' succeeded
    CRS-4256: Updating the profile
    Successful addition of voting disk c397468902ba4f76bf99287b7e8b1e91.
    Successful addition of voting disk fbb3600816064f02bf3066783b703f6d.
    Successful addition of voting disk f5dec135cf474f56bf3a69bdba629daf.
    Successfully replaced voting disk group with +DATA.
    CRS-4256: Updating the profile
    CRS-4266: Voting file(s) successfully replaced
    ##  STATE    File Universal Id                File Name Disk group
    --  -----    -----------------                --------- ---------
     1. ONLINE   c397468902ba4f76bf99287b7e8b1e91 (AFD:DATA1) [DATA]
     2. ONLINE   fbb3600816064f02bf3066783b703f6d (AFD:DATA2) [DATA]
     3. ONLINE   f5dec135cf474f56bf3a69bdba629daf (AFD:DATA3) [DATA]
    Located 3 voting disk(s).
    CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'dsctw21'
    CRS-2673: Attempting to stop 'ora.crsd' on 'dsctw21'
    ..'
    CRS-2677: Stop of 'ora.driver.afd' on 'dsctw21' succeeded
    CRS-2677: Stop of 'ora.gipcd' on 'dsctw21' succeeded
    CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'dsctw21' has completed
    CRS-4133: Oracle High Availability Services has been stopped.
    2017/05/04 12:25:58 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.
    CRS-4123: Starting Oracle High Availability Services-managed resources
    ..
    CRS-2676: Start of 'ora.crsd' on 'dsctw21' succeeded
    CRS-6023: Starting Oracle Cluster Ready Services-managed resources
    CRS-6017: Processing resource auto-start for servers: dsctw21
    CRS-6016: Resource auto-start has completed for server dsctw21
    CRS-6024: Completed start of Oracle Cluster Ready Services-managed resources
    CRS-4123: Oracle High Availability Services has been started.
    2017/05/04 12:28:36 CLSRSC-343: Successfully started Oracle Clusterware stack
    2017/05/04 12:28:36 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
    CRS-2672: Attempting to start 'ora.net1.network' on 'dsctw21'
    CRS-2676: Start of 'ora.net1.network' on 'dsctw21' succeeded
    ..
    CRS-2676: Start of 'ora.DATA.dg' on 'dsctw21' succeeded
    2017/05/04 12:31:44 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
    
    Disk label(s) created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-170504PM123151.log for details.
    2017/05/04 12:38:07 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
    
    Run root.sh on the second Node:
    [root@dsctw22 app]# /u01/app/122/grid/root.sh
    Performing root user operation.
    ..
    2017/05/04 12:47:44 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
    2017/05/04 12:47:54 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
    2017/05/04 12:48:19 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
    
    After all root scripts have been finished continue the installation process !
    
    • After GIMR database was created and installation process runs a final cluvfy – hopefully successful verify your installation logs :
    Install Logs Location 
     /u01/app/oraInventory/logs/GridSetupActions2017-05-05_02-24-23PM/gridSetupActions2017-05-05_02-24-23PM.log

    Verify Domain Service Cluster setup using cluvfy

    Verify Domain Service cluster setup using cluvfy 
    [grid@dsctw21 ~]$ cluvfy stage -post crsinst -n dsctw21,dsctw22
    
    Verifying Node Connectivity ...
      Verifying Hosts File ...PASSED
      Verifying Check that maximum (MTU) size packet goes through subnet ...PASSED
      Verifying subnet mask consistency for subnet "192.168.2.0" ...PASSED
      Verifying subnet mask consistency for subnet "192.168.5.0" ...PASSED
    Verifying Node Connectivity ...PASSED
    Verifying Multicast check ...PASSED
    Verifying ASM filter driver configuration consistency ...PASSED
    Verifying Time zone consistency ...PASSED
    Verifying Cluster Manager Integrity ...PASSED
    Verifying User Mask ...PASSED
    Verifying Cluster Integrity ...PASSED
    Verifying OCR Integrity ...PASSED
    Verifying CRS Integrity ...
      Verifying Clusterware Version Consistency ...PASSED
    Verifying CRS Integrity ...PASSED
    Verifying Node Application Existence ...PASSED
    Verifying Single Client Access Name (SCAN) ...
      Verifying DNS/NIS name service 'dsctw2-scan.dsctw2.dsctw2.example.com' ...
        Verifying Name Service Switch Configuration File Integrity ...PASSED
      Verifying DNS/NIS name service 'dsctw2-scan.dsctw2.dsctw2.example.com' ...PASSED
    Verifying Single Client Access Name (SCAN) ...PASSED
    Verifying OLR Integrity ...PASSED
    Verifying GNS Integrity ...
      Verifying subdomain is a valid name ...PASSED
      Verifying GNS VIP belongs to the public network ...PASSED
      Verifying GNS VIP is a valid address ...PASSED
      Verifying name resolution for GNS sub domain qualified names ...PASSED
      Verifying GNS resource ...PASSED
      Verifying GNS VIP resource ...PASSED
    Verifying GNS Integrity ...PASSED
    Verifying Voting Disk ...PASSED
    Verifying ASM Integrity ...
      Verifying Node Connectivity ...
        Verifying Hosts File ...PASSED
        Verifying Check that maximum (MTU) size packet goes through subnet ...PASSED
        Verifying subnet mask consistency for subnet "192.168.2.0" ...PASSED
        Verifying subnet mask consistency for subnet "192.168.5.0" ...PASSED
      Verifying Node Connectivity ...PASSED
    Verifying ASM Integrity ...PASSED
    Verifying Device Checks for ASM ...PASSED
    Verifying ASM disk group free space ...PASSED
    Verifying I/O scheduler ...
      Verifying Package: cvuqdisk-1.0.10-1 ...PASSED
    Verifying I/O scheduler ...PASSED
    Verifying User Not In Group "root": grid ...PASSED
    Verifying Clock Synchronization ...
    CTSS is in Observer state. Switching over to clock synchronization checks using NTP
    
      Verifying Network Time Protocol (NTP) ...
        Verifying '/etc/chrony.conf' ...PASSED
        Verifying '/var/run/chronyd.pid' ...PASSED
        Verifying Daemon 'chronyd' ...PASSED
        Verifying NTP daemon or service using UDP port 123 ...PASSED
        Verifying chrony daemon is synchronized with at least one external time source ...PASSED
      Verifying Network Time Protocol (NTP) ...PASSED
    Verifying Clock Synchronization ...PASSED
    Verifying Network configuration consistency checks ...PASSED
    Verifying File system mount options for path GI_HOME ...PASSED
    
    Post-check for cluster services setup was successful. 
    
    CVU operation performed:      stage -post crsinst
    Date:                         May 7, 2017 10:10:04 AM
    CVU home:                     /u01/app/122/grid/
    User:                         grid

    Check cluster Resources used by DSC

    [root@dsctw21 ~]# crs
    *****  Local Resources: *****
    Rescource NAME                 TARGET     STATE           SERVER       STATE_DETAILS                       
    -------------------------      ---------- ----------      ------------ ------------------                  
    ora.ASMNET1LSNR_ASM.lsnr       ONLINE     ONLINE          dsctw21      STABLE   
    ora.DATA.dg                    ONLINE     ONLINE          dsctw21      STABLE   
    ora.LISTENER.lsnr              ONLINE     ONLINE          dsctw21      STABLE   
    ora.MGMT.GHCHKPT.advm          ONLINE     ONLINE          dsctw21      STABLE   
    ora.MGMT.dg                    ONLINE     ONLINE          dsctw21      STABLE   
    ora.chad                       ONLINE     ONLINE          dsctw21      STABLE   
    ora.helper                     ONLINE     ONLINE          dsctw21      IDLE,STABLE   
    ora.mgmt.ghchkpt.acfs          ONLINE     ONLINE          dsctw21      mounted on /mnt/oracle/rhpimages/chkbase,STABLE
    ora.net1.network               ONLINE     ONLINE          dsctw21      STABLE   
    ora.ons                        ONLINE     ONLINE          dsctw21      STABLE   
    ora.proxy_advm                 ONLINE     ONLINE          dsctw21      STABLE   
    *****  Cluster Resources: *****
    Resource NAME               INST   TARGET       STATE        SERVER          STATE_DETAILS
    --------------------------- ----   ------------ ------------ --------------- -----------------------------------------
    ora.LISTENER_SCAN1.lsnr        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.LISTENER_SCAN2.lsnr        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.LISTENER_SCAN3.lsnr        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.MGMTLSNR                   1   ONLINE       ONLINE       dsctw21         169.254.108.231 192. 168.2.151,STABLE
    ora.asm                        1   ONLINE       ONLINE       dsctw21         Started,STABLE  
    ora.asm                        2   ONLINE       OFFLINE      -               STABLE  
    ora.asm                        3   OFFLINE      OFFLINE      -               STABLE  
    ora.cvu                        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.dsctw21.vip                1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.dsctw22.vip                1   ONLINE       INTERMEDIATE dsctw21         FAILED OVER,STABLE 
    ora.gns                        1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.gns.vip                    1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.ioserver                   1   ONLINE       OFFLINE      -               STABLE  
    ora.ioserver                   2   ONLINE       ONLINE       dsctw21         STABLE  
    ora.ioserver                   3   ONLINE       OFFLINE      -               STABLE  
    ora.mgmtdb                     1   ONLINE       ONLINE       dsctw21         Open,STABLE  
    ora.qosmserver                 1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.rhpserver                  1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.scan1.vip                  1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.scan2.vip                  1   ONLINE       ONLINE       dsctw21         STABLE  
    ora.scan3.vip                  1   ONLINE       ONLINE       dsctw21         STABLE

    Following Resouces should be ONLINE for a DSC cluster

    -> ioserver
    -> mgmtdb
    -> rhpserver
    • If any of these resources are not ONLINE try to start them with: $srvctl start

    Verify Domain Service cluster setup using srvclt,rhpctl,asmcmd

    [grid@dsctw21 peer]$ rhpctl query server
    Rapid Home Provisioning Server (RHPS): dsctw2
    Storage base path: /mnt/oracle/rhpimages
    Disk Groups: MGMT
    Port number: 23795
    [grid@dsctw21 peer]$ rhpctl quey workingcopy
    No software home has been configured
    
    [grid@dsctw21 peer]$ rhpctl query image
    No image has been configured
    
    Check ASM disk groups
    [grid@dsctw21 peer]$ asmcmd lsdg
    State    Type    Rebal  Sector  Logical_Sector  Block       AU  Total_MB  Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name
    MOUNTED  NORMAL  N         512             512   4096  4194304     81920    81028            20480           30274              0             Y  DATA/
    MOUNTED  EXTERN  N         512             512   4096  4194304    307200   265376                0          265376              0             N  MGMT/
    
    Verify GNS
    [grid@dsctw21 peer]$  srvctl config gns
    GNS is enabled.
    GNS VIP addresses: 192.168.5.60
    Domain served by GNS: dsctw2.example.com
    
    [grid@dsctw21 peer]$  srvctl config gns -list
    dsctw2.example.com DLV 50343 10 18 ( zfiaA8U30oiGSATInCdyN7pIKf1ZIVQhHsF6OQti9bvXw7dUhNmDv/txClkHX6BjkLTBbPyWGdRjEMf+uUqYHA== ) Unique Flags: 0x314
    dsctw2.example.com DNSKEY 7 3 10 ( MIIBCgKCAQEAmxQnG2xkpQMXGRXD2tBTZkUKYUsV+Sj/w6YmpFdpMQVoNVSXJCWgCDqIjLrfVA2AQUeEaAek6pfOlMp6Tev2nPVvNqPpul5Fs63cFVzwjdTI4zU6lSC6+2UVJnAN6BTEmrOzKKt/kuxoNNI7V4DZ5Nj6UoUJ2MXGr/+RSU44GboHnrftvFaVN8pp0TOoOBTj5hHH8C73I+lFfDNhMXEY8WQhb1nP6Cv02qPMsbb8edq1Dy8lt6N6kzjh+9hKPNdqM7HB3OVV5L18E5HtLjWOhMZLqJ7oDTDsQcMMuYmfFjbi3JvGQrdTlGHAv9f4W/vRL/KV8bDkDFnSRSFubxsbdQIDAQAB ) Unique Flags: 0x314
    dsctw2.example.com NSEC3PARAM 10 0 2 ( jvm6kO+qyv65ztXFy53Dkw== ) Unique Flags: 0x314
    dsctw2-scan.dsctw2 A 192.168.5.231 Unique Flags: 0x1
    dsctw2-scan.dsctw2 A 192.168.5.234 Unique Flags: 0x1
    dsctw2-scan.dsctw2 A 192.168.5.235 Unique Flags: 0x1
    dsctw2-scan1-vip.dsctw2 A 192.168.5.231 Unique Flags: 0x1
    dsctw2-scan2-vip.dsctw2 A 192.168.5.235 Unique Flags: 0x1
    dsctw2-scan3-vip.dsctw2 A 192.168.5.234 Unique Flags: 0x1
    
    [grid@dsctw21 peer]$ nslookup dsctw2-scan.dsctw2.example.com
    Server:        192.168.5.50
    Address:    192.168.5.50#53
    
    Non-authoritative answer:
    Name:    dsctw2-scan.dsctw2.example.com
    Address: 192.168.5.234
    Name:    dsctw2-scan.dsctw2.example.com
    Address: 192.168.5.231
    Name:    dsctw2-scan.dsctw2.example.com
    Address: 192.168.5.235
    
    
    Verify Management Repository
    [grid@dsctw21 peer]$ oclumon manage -get MASTER
    Master = dsctw21
    
    [grid@dsctw21 peer]$ srvctl status mgmtdb 
    Database is enabled
    Instance -MGMTDB is running on node dsctw21
    [grid@dsctw21 peer]$ srvctl config mgmtdb
    Database unique name: _mgmtdb
    Database name: 
    Oracle home: <CRS home>
    Oracle user: grid
    Spfile: +MGMT/_MGMTDB/PARAMETERFILE/spfile.272.943198901
    Password file: 
    Domain: 
    Start options: open
    Stop options: immediate
    Database role: PRIMARY
    Management policy: AUTOMATIC
    Type: Management
    PDB name: GIMR_DSCREP_10
    PDB service: GIMR_DSCREP_10
    Cluster name: dsctw2
    Database instance: -MGMTDB
    
    --> PDB Name and Serive Name GIMR_DSCREP_10 is NEW with 12.2
        With lower versions get the cluster name here !
    
    
    
    [grid@dsctw21 peer]$  oclumon manage -get reppath
    CHM Repository Path = +MGMT/_MGMTDB/4EC81829D5715AD0E0539705A8C084C6/DATAFILE/sysmgmtdata.280.943199159
    [grid@dsctw21 peer]$ asmcmd  ls -ls +MGMT/_MGMTDB/4EC81829D5715AD0E0539705A8C084C6/DATAFILE/sysmgmtdata.280.943199159
    Type      Redund  Striped  Time             Sys  Block_Size  Blocks       Bytes       Space  Name
    DATAFILE  UNPROT  COARSE   MAY 05 17:00:00  Y          8192  262145  2147491840  2155872256  sysmgmtdata.280.943199159
    
    [grid@dsctw21 peer]$  oclumon dumpnodeview -allnodes
    ----------------------------------------
    Node: dsctw21 Clock: '2017-05-06 09.29.55+0200' SerialNo:4469 
    ----------------------------------------
    SYSTEM:
    #pcpus: 1 #cores: 4 #vcpus: 4 cpuht: N chipname: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz cpuusage: 26.48 cpusystem: 2.78 cpuuser: 23.70 cpunice: 0.00 cpuiowait: 0.05 cpusteal: 0.00 cpuq: 0 physmemfree: 695636 physmemtotal: 6708204 mcache: 2800060 swapfree: 7202032 swaptotal: 8257532 hugepagetotal: 0 hugepagefree: 0 hugepagesize: 2048 ior: 311 iow: 229 ios: 92 swpin: 0 swpout: 0 pgin: 3 pgout: 40 netr: 32.601 netw: 27.318 procs: 479 procsoncpu: 3 #procs_blocked: 0 rtprocs: 17 rtprocsoncpu: N/A #fds: 34496 #sysfdlimit: 6815744 #disks: 14 #nics: 3 loadavg1: 2.24 loadavg5: 1.99 loadavg15: 1.89 nicErrors: 0
    TOP CONSUMERS:
    topcpu: 'gnome-shell(6512) 5.00' topprivmem: 'java(660) 347292' topshm: 'mdb_dbw0_-MGMTDB(28946) 352344' topfd: 'ocssd.bin(6204) 370' topthread: 'crsd.bin(8615) 52' 
    
    ----------------------------------------
    Node: dsctw22 Clock: '2017-05-06 09.29.55+0200' SerialNo:3612 
    ----------------------------------------
    SYSTEM:
    #pcpus: 1 #cores: 4 #vcpus: 4 cpuht: N chipname: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz cpuusage: 1.70 cpusystem: 0.77 cpuuser: 0.92 cpunice: 0.00 cpuiowait: 0.00 cpusteal: 0.00 cpuq: 0 physmemfree: 828740 physmemtotal: 5700592 mcache: 2588336 swapfree: 8244596 swaptotal: 8257532 hugepagetotal: 0 hugepagefree: 0 hugepagesize: 2048 ior: 2 iow: 68 ios: 19 swpin: 0 swpout: 0 pgin: 0 pgout: 63 netr: 10.747 netw: 18.222 procs: 376 procsoncpu: 1 #procs_blocked: 0 rtprocs: 15 rtprocsoncpu: N/A #fds: 29120 #sysfdlimit: 6815744 #disks: 14 #nics: 3 loadavg1: 1.44 loadavg5: 1.39 loadavg15: 1.43 nicErrors: 0
    TOP CONSUMERS:
    topcpu: 'orarootagent.bi(7345) 1.20' topprivmem: 'java(8936) 270140' topshm: 'ocssd.bin(5833) 119060' topfd: 'gnsd.bin(9072) 1242' topthread: 'crsd.bin(7137) 49' 
    
    
    Verify TFA status
    [grid@dsctw21 peer]$ tfactl print status
    TFA-00099: Printing status of TFA
    
    .-----------------------------------------------------------------------------------------------.
    | Host    | Status of TFA | PID   | Port | Version    | Build ID             | Inventory Status |
    +---------+---------------+-------+------+------------+----------------------+------------------+
    | dsctw21 | RUNNING       | 32084 | 5000 | 12.2.1.0.0 | 12210020161122170355 | COMPLETE         |
    | dsctw22 | RUNNING       |  3929 | 5000 | 12.2.1.0.0 | 12210020161122170355 | COMPLETE         |
    '---------+---------------+-------+------+------------+----------------------+------------------'
    
    [grid@dsctw21 peer]$ tfactl print config
    .------------------------------------------------------------------------------------.
    |                                       dsctw21                                      |
    +-----------------------------------------------------------------------+------------+
    | Configuration Parameter                                               | Value      |
    +-----------------------------------------------------------------------+------------+
    | TFA Version                                                           | 12.2.1.0.0 |
    | Java Version                                                          | 1.8        |
    | Public IP Network                                                     | true       |
    | Automatic Diagnostic Collection                                       | true       |
    | Alert Log Scan                                                        | true       |
    | Disk Usage Monitor                                                    | true       |
    | Managelogs Auto Purge                                                 | false      |
    | Trimming of files during diagcollection                               | true       |
    | Inventory Trace level                                                 | 1          |
    | Collection Trace level                                                | 1          |
    | Scan Trace level                                                      | 1          |
    | Other Trace level                                                     | 1          |
    | Repository current size (MB)                                          | 13         |
    | Repository maximum size (MB)                                          | 10240      |
    | Max Size of TFA Log (MB)                                              | 50         |
    | Max Number of TFA Logs                                                | 10         |
    | Max Size of Core File (MB)                                            | 20         |
    | Max Collection Size of Core Files (MB)                                | 200        |
    | Minimum Free Space to enable Alert Log Scan (MB)                      | 500        |
    | Time interval between consecutive Disk Usage Snapshot(minutes)        | 60         |
    | Time interval between consecutive Managelogs Auto Purge(minutes)      | 60         |
    | Logs older than the time period will be auto purged(days[d]|hours[h]) | 30d        |
    | Automatic Purging                                                     | true       |
    | Age of Purging Collections (Hours)                                    | 12         |
    | TFA IPS Pool Size                                                     | 5          |
    '-----------------------------------------------------------------------+------------'
    
    .------------------------------------------------------------------------------------.
    |                                       dsctw22                                      |
    +-----------------------------------------------------------------------+------------+
    | Configuration Parameter                                               | Value      |
    +-----------------------------------------------------------------------+------------+
    | TFA Version                                                           | 12.2.1.0.0 |
    | Java Version                                                          | 1.8        |
    | Public IP Network                                                     | true       |
    | Automatic Diagnostic Collection                                       | true       |
    | Alert Log Scan                                                        | true       |
    | Disk Usage Monitor                                                    | true       |
    | Managelogs Auto Purge                                                 | false      |
    | Trimming of files during diagcollection                               | true       |
    | Inventory Trace level                                                 | 1          |
    | Collection Trace level                                                | 1          |
    | Scan Trace level                                                      | 1          |
    | Other Trace level                                                     | 1          |
    | Repository current size (MB)                                          | 0          |
    | Repository maximum size (MB)                                          | 10240      |
    | Max Size of TFA Log (MB)                                              | 50         |
    | Max Number of TFA Logs                                                | 10         |
    | Max Size of Core File (MB)                                            | 20         |
    | Max Collection Size of Core Files (MB)                                | 200        |
    | Minimum Free Space to enable Alert Log Scan (MB)                      | 500        |
    | Time interval between consecutive Disk Usage Snapshot(minutes)        | 60         |
    | Time interval between consecutive Managelogs Auto Purge(minutes)      | 60         |
    | Logs older than the time period will be auto purged(days[d]|hours[h]) | 30d        |
    | Automatic Purging                                                     | true       |
    | Age of Purging Collections (Hours)                                    | 12         |
    | TFA IPS Pool Size                                                     | 5          |
    '-----------------------------------------------------------------------+------------'
    
    [grid@dsctw21 peer]$ tfactl  print  actions
    .-----------------------------------------------------------.
    | HOST | START TIME | END TIME | ACTION | STATUS | COMMENTS |
    +------+------------+----------+--------+--------+----------+
    '------+------------+----------+--------+--------+----------'
    
    [grid@dsctw21 peer]$ tfactl print errors 
    Total Errors found in database: 0
    DONE
    
    [grid@dsctw21 peer]$  tfactl print startups
    ++++++ Startup Start +++++
    Event Id     : nullfom14v2mu0u82nkf5uufjoiuia
    File Name    : /u01/app/grid/diag/apx/+apx/+APX1/trace/alert_+APX1.log
    Startup Time : Fri May 05 15:07:03 CEST 2017
    Dummy        : FALSE
    ++++++ Startup End +++++
    ++++++ Startup Start +++++
    Event Id     : nullgp6ei43ke5qeqo8ugemsdqrle1
    File Name    : /u01/app/grid/diag/asm/+asm/+ASM1/trace/alert_+ASM1.log
    Startup Time : Fri May 05 14:58:28 CEST 2017
    Dummy        : FALSE
    ++++++ Startup End +++++
    ++++++ Startup Start +++++
    Event Id     : nullt7p1681pjq48qt17p4f8odrrgf
    File Name    : /u01/app/grid/diag/rdbms/_mgmtdb/-MGMTDB/trace/alert_-MGMTDB.log
    Startup Time : Fri May 05 15:27:13 CEST 2017
    Dummy        : FALSE
    
    
    

    Potential Error: ORA-845 starting IOServer Instances

     
    [grid@dsctw21 ~]$ srvctl start ioserver
    PRCR-1079 : Failed to start resource ora.ioserver
    CRS-5017: The resource action "ora.ioserver start" encountered the following error: 
    ORA-00845: MEMORY_TARGET not supported on this system
    . For details refer to "(:CLSN00107:)" in "/u01/app/grid/diag/crs/dsctw22/crs/trace/crsd_oraagent_grid.trc".
    
    CRS-2674: Start of 'ora.ioserver' on 'dsctw22' failed
    CRS-5017: The resource action "ora.ioserver start" encountered the following error: 
    ORA-00845: MEMORY_TARGET not supported on this system
    . For details refer to "(:CLSN00107:)" in "/u01/app/grid/diag/crs/dsctw21/crs/trace/crsd_oraagent_grid.trc".
    
    CRS-2674: Start of 'ora.ioserver' on 'dsctw21' failed
    CRS-2632: There are no more servers to try to place resource 'ora.ioserver' on that would satisfy its placement policy
    
    From +IOS1 alert.log :  ./diag/ios/+ios/+IOS1/trace/alert_+IOS1.log
    
    WARNING: You are trying to use the MEMORY_TARGET feature. This feature requires the /dev/shm file system to be mounted for at least 4513071104 bytes. /dev/shm is either not mounted or is mounted with available space less than this size. Please fix this so that MEMORY_TARGET can work as expected. Current available is 2117439488 and used is 1317158912 bytes. Ensure that the mount point is /dev/shm for this directory.
    
    Verify /dev/shm
    [root@dsctw22 ~]# df -h /dev/shm
    Filesystem      Size  Used Avail Use% Mounted on
    tmpfs           2.8G  1.3G  1.5G  46% /dev/shm
    
    
    Modify /etc/fstab 
    # /etc/fstab
    # Created by anaconda on Tue Apr  4 12:13:16 2017
    #
    #
    tmpfs                                           /dev/shm                tmpfs   defaults,size=6g  0 0 
    
    and increase  /dev/shm  to 6 GByte. Remount tmpfs 
    [root@dsctw22 ~]# mount -o remount tmpfs 
    [root@dsctw22 ~]# df -h /dev/shm
    Filesystem      Size  Used Avail Use% Mounted on
    tmpfs           6.0G  1.3G  4.8G  21% /dev/shm

    Do a silent installation

    From Grid Infrastructure Installation and Upgrade Guide
    A.7.2 Running Postinstallation Configuration Using Response File
    
    Complete this procedure to run configuration assistants with the executeConfigTools command.
    
    Edit the response file and specify the required passwords for your configuration. 
    You can use the response file created during installation, located at $ORACLE_HOME/install/response/product_timestamp.rsp. 
    
    [root@dsctw21 ~]# ls -l $ORACLE_HOME/install/response/
    total 112
    -rw-r--r--. 1 grid oinstall 34357 Jan 26 17:10 grid_2017-01-26_04-10-28PM.rsp
    -rw-r--r--. 1 grid oinstall 35599 May 23 15:50 grid_2017-05-22_04-51-05PM.rsp
    
    Verify that Password Settings  For Oracle Grid Infrastructure:
    [root@dsctw21 ~]# cd  $ORACLE_HOME/install/response/
    [root@dsctw21 response]#  grep -i passw grid_2017-05-22_04-51-05PM.rsp
    # Password for SYS user of Oracle ASM
    oracle.install.asm.SYSASMPassword=sys
    # Password for ASMSNMP account
    oracle.install.asm.monitorPassword=sys
    
    I have not verified this but its seems that not setting passwords could lead to following errors during Member Cluster Setup :
    [INS-30211] An unexpected exception occurred while extracting details from ASM client data
           PRCI-1167 : failed to extract atttributes from the specified file "/home/grid/FILES/mclu2.xml"
           PRCT-1453 : failed to get ASM properties from ASM client data file /home/grid/FILES/mclu2.xml
           KFOD-00319: failed to read the credential file /home/grid/FILES/mclu2.xml 
    
    [grid@dsctw21 grid]$ gridSetup.sh -silent  -skipPrereqs -responseFile grid_2017-05-22_04-51-05PM.rsp  
    Launching Oracle Grid Infrastructure Setup Wizard...
    ..
    You can find the log of this install session at:
     /u01/app/oraInventory/logs/GridSetupActions2017-05-20_12-17-29PM/gridSetupActions2017-05-20_12-17-29PM.log
    
    As a root user, execute the following script(s):
        1. /u01/app/oraInventory/orainstRoot.sh
        2. /u01/app/122/grid/root.sh
    
    Execute /u01/app/oraInventory/orainstRoot.sh on the following nodes: 
    [dsctw22]
    Execute /u01/app/122/grid/root.sh on the following nodes: 
    [dsctw21, dsctw22]
    
    Run the script on the local node first. After successful completion, you can start the script in parallel on all other nodes.
    
    Successfully Setup Software.
    As install user, execute the following command to complete the configuration.
        /u01/app/122/grid/gridSetup.sh -executeConfigTools -responseFile /home/grid/grid_dsctw2.rsp [-silent]
    
    -> Run root.sh scripts 
    
    [grid@dsctw21 grid]$ /u01/app/122/grid/gridSetup.sh -executeConfigTools -responseFile grid_2017-05-22_04-51-05PM.rsp  
    Launching Oracle Grid Infrastructure Setup Wizard...
    
    You can find the logs of this session at:
    /u01/app/oraInventory/logs/GridSetupActions2017-05-20_05-34-08PM

    Backup OCR and export GNS

    • Note as Member cluster install has killed my shared GNS 2x is may be a good idea to backup OCR and export GNS right NOW
    Backup  OCR 
    
    [root@dsctw21 cfgtoollogs]# ocrconfig -manualbackup
    dsctw21     2017/05/22 19:03:53     +MGMT:/dsctw/OCRBACKUP/backup_20170522_190353.ocr.284.944679833     0   
      
    [root@dsctw21 cfgtoollogs]# ocrconfig -showbackup
    PROT-24: Auto backups for the Oracle Cluster Registry are not available
    dsctw21     2017/05/22 19:03:53     +MGMT:/dsctw/OCRBACKUP/backup_20170522_190353.ocr.284.944679833     0   
       
    Locate all OCR backups 
    ASMCMD>  find --type OCRBACKUP / *
    +MGMT/dsctw/OCRBACKUP/backup_20170522_190353.ocr.284.944679833
    ASMCMD> ls -l +MGMT/dsctw/OCRBACKUP/backup_20170522_190353.ocr.284.944679833
    Type       Redund  Striped  Time             Sys  Name
    OCRBACKUP  UNPROT  COARSE   MAY 22 19:00:00  Y    backup_20170522_190353.ocr.284.944679833
    
    Export the GNS to a file
    [root@dsctw21 cfgtoollogs]# srvctl stop gns
    [root@dsctw21 cfgtoollogs]# srvctl export gns -instance /root/dsc-gns.export 
    [root@dsctw21 cfgtoollogs]# srvctl start gns
    
    Dump GNS data
    [root@dsctw21 cfgtoollogs]#  srvctl export gns -instance /root/dsc-gns.export 
    [root@dsctw21 cfgtoollogs]# srvctl start gns
    [root@dsctw21 cfgtoollogs]# srvctl config gns -list
    dsctw21.CLSFRAMEdsctw SRV Target: 192.168.2.151.dsctw Protocol: tcp Port: 12642 Weight: 0 Priority: 0 Flags: 0x101
    dsctw21.CLSFRAMEdsctw TXT NODE_ROLE="HUB", NODE_INCARNATION="0", NODE_TYPE="20" Flags: 0x101
    dsctw22.CLSFRAMEdsctw SRV Target: 192.168.2.152.dsctw Protocol: tcp Port: 35675 Weight: 0 Priority: 0 Flags: 0x101
    dsctw22.CLSFRAMEdsctw TXT NODE_ROLE="HUB", NODE_INCARNATION="0", NODE_TYPE="20" Flags: 0x101
    dscgrid.example.com DLV 35418 10 18 ( /a+Iu8QgPs9k96CoQ6rFVQrqmGFzZZNKRo952Ujjkj8dcDlHSA+JMcEMHLC3niuYrM/eFeAj3iFpihrIEohHXQ== ) Unique Flags: 0x314
    dscgrid.example.com DNSKEY 7 3 10 ( MIIBCgKCAQEAxnVyA60TYUeEKkNvEaWrAFg2oDXrFbR9Klx7M5N/UJadFtF8h1e32Bf8jpL6cq1yKRI3TVdrneuiag0OiQfzAycLjk98VUz+L3Q5AHGYCta62Kjaq4hZOFcgF/BCmyY+6tWMBE8wdivv3CttCiH1U7x3FUqbgCb1iq3vMcS6X64k3MduhRankFmfs7zkrRuWJhXHfRaDz0mNXREeW2VvPyThXPs+EOPehaDhXRmJBWjBkeZNIaBTiR8jKTTY1bSPzqErEqAYoH2lR4rAg9TVKjOkdGrAmJJ6AGvEBfalzo4CJtphAmygFd+/ItFm5koFb2ucFr1slTZz1HwlfdRVGwIDAQAB ) Unique Flags: 0x314
    dscgrid.example.com NSEC3PARAM 10 0 2 ( jvm6kO+qyv65ztXFy53Dkw== ) Unique Flags: 0x314
    dsctw-scan.dsctw A 192.168.5.225 Unique Flags: 0x81
    dsctw-scan.dsctw A 192.168.5.227 Unique Flags: 0x81
    dsctw-scan.dsctw A 192.168.5.232 Unique Flags: 0x81
    dsctw-scan1-vip.dsctw A 192.168.5.232 Unique Flags: 0x81
    dsctw-scan2-vip.dsctw A 192.168.5.227 Unique Flags: 0x81
    dsctw-scan3-vip.dsctw A 192.168.5.225 Unique Flags: 0x81
    dsctw21-vip.dsctw A 192.168.5.226 Unique Flags: 0x81
    dsctw22-vip.dsctw A 192.168.5.235 Unique Flags: 0x81
    dsctw-scan1-vip A 192.168.5.232 Unique Flags: 0x81
    dsctw-scan2-vip A 192.168.5.227 Unique Flags: 0x81
    dsctw-scan3-vip A 192.168.5.225 Unique Flags: 0x81
    dsctw21-vip A 192.168.5.226 Unique Flags: 0x81
    dsctw22-vip A 192.168.5.235 Unique Flags: 0x81
    
    
    

    Reference