Search This Blog

Sunday, December 20, 2015

Flashback Technology

fra = flash recovery area / fast recovery area
aum = automatic undo management
scn = system change number
da  = data archive

select log_mode, flashback_on from v$database;

FlashBack Database Procedure
1. Put DB in mount mode
2. RMAN -> Flashback to  -time, scn, restore point
3. Open DB with RESETLOGS
or
    Open DB Read Only for testing

show parameter retention;
undo_retention = 900 (15 min)
db_flashback_retention_target = 1440 (means 24 hours)

show parameter undo;

show parameter recovery;

make sure system is in archivelog mode.

select log_mode from v$database;

to change retention value

alter system set db_flashback_retention_target=2880;  (48 hours)


To set flashback on for the database:

select log_mode, flashback_on from v$database;

shutdown immediate;
startup mount;
alter database flashback on;

alter database open;

To confirm:
select log_mode, flashback_on from v$database;


Restore Points

create restore point rp1;

select scn, time, name from v$restore_point;


create restore point test guarantee flashback database;

drop restore point rp1;


To check the flashback window (how far back we can go to flashback our data)

select oldest_flashback_scn, oldest_flashback_time
from v$flashback_database_log;


Test Scenario

create table scott.t1 as select * from scott.emp;

select count(*) from scott.t1;

select current_scn from v$database;

create restore point emp1;

truncate table scott.t1;

select count(*) from scott.t1;

we get 0


Go to RMAN


$rman target /

shutdown immediate;
startup mount;
FLASHBACK DATABASE TO RESTORE POINT "TEST";
sql 'alter database open read only';
or
alter database open resetlogs;

Now check the data in scott.t1 table.



Oracle total recall, flashback data archive

create tablespace tbs1
datafile '/u01/prod/tbs01.dbf'
size 100m
autoextend on
next 100m
maxsize 10G;

Now we will create the flashback archive on this tablespace.

create flashback archive fba1
tablespace tbs1
quota 10G
retention 1 year;

If we have multiple data archives we can set any one default.

alter flashback archive fba1 set default;

To change retention policy:

ALTER FLASHBACK ARCHIVE fba1 MODIFY RETENTION 2 YEAR;

ALTER FLASHBACK ARCHIVE fba1 MODIFY RETENTION 1 MONTH;

show user;
SYS

This is super user and has all the privileges.

To grant this to others:

grant flashback archive administer to scott;

select flashback_archive_name from dba_flashback_archive;

grant flashback archive on fba1 to scott;


We can assign a table to this flashback archive.

create table scott.t2 as select * from scott.t1;

alter table scott.t2 flashback archive fba1;

delete from scott.t2;

select * from scott.t2 as of timestamp
to_timestamp('2015-12-20 10:15:00','YYYY-MM-DD HH24:MI:SS');

If we find the required rows we can insert the same into our table.

insert into t1 select * from t1 as of timestamp
to_timestamp('2015-12-20 10:45:00','YYYY-MM-DD HH24:MI:SS');

We can unlink the table from flashback archive.

alter table scott.t1 no flashback archive;

desc dba_flashback_archive;

desc dba_flashback_archive_ts;

desc dba_flashback_archive_tables;


Flashback Table (undrop a table)
Flashback Query

Flashback Transaction


Flashback Query

select product_id, purchprice
from products
as of timestamp / scn
where product_id=100;

update products set salesprice =
(select salesprice from products
as of timestamp "to_timestamp('2015-12-20 10:15:00','YYYY-MM-DD HH24:MI:SS')
where product_id=100)
where product_id=100;


Flashback Version Query

select product_id, purchprice
from products
versions between timestamp <t1> and <t2>
where product_id=100;

alter system set recyclebin = 'ON';
alter system set recyclebin = 'OFF';

by default this is on

show recyclebin;

show parameter recyclebin;

select flashback_on from v$database;

disconnect;

conn scott/tiger

create table t3 as select * from emp;

select count(*) from t3;

commit;

drop table t3;

select * from t3;
(table doesn't exist)

show recyclebin; (this works only in the same user where we dropped a table)

its here

flashback table t3 to before drop;

select count(*) from t3;

To empty the recyclebin:

purge recyclebin;

To use flashback table feature we must enable the row movement for that table:

select * from t3 order by empno;

Row movement is oracle's ability to restructure the rows.

select row_movement from user_tables
where table_name = 'T3';

alter table t3 enable row movement;

select row_movement from user_tables
where table_name = 'T3';

Here we can go to a particular time backwards.
We must know the good time.

select to_char(oldest_flashback_time, 'mm/dd/yy hh24:mi:ss')
from v$flashback_database_log;

Whatever time comes as the result of the above query we can go back upto that time.


Testing Scenario

select * from t3 where deptno = 10;

delete from t3 where deptno = 10;

There is a constraint problem, so we disable the constraint.

alter table dept disable constraint dept_mgr_fk;

flashback table t3 to timestamp
to_date('20-DEC-15 10:30:20','DD-MON-YY HH24:MI:SS');

select * from t3 where deptno=10;


Flashback Transaction Query

conn / as sysdba
alter database add supplemental log data (primary key) columns;

desc flashback_transaction_query;
desc emp;
select ename, sal from scott.emp where empno = 7900;

update emp set sal = sal * 2 where empno = 7900;

commit;

Now come to flashback:

select ename, sal, versions_xid from emp
versions between scn minvalue and maxvalue
where empno = 7900;

We can see the last 2 records in the result.

based on that 

execute dbms_flashback.transaction_backout(numtxns=>1,xids=>sys.xid_array('txnno'));

here 1 means only one transaction
txnno paste the actual transaction id

now select that row to see the old result

select ename from emp
where ename between 'A%' and 'C%';

delete from emp where ename like 'A%';

commit;

select ename from emp as of timestamp
to_timestamp('23-05-12 12:31:00','DD-MM-YY HH24:MI:SS');

Friday, December 18, 2015

Logminer

Its a pl/sql based utility.

Steps:

alter database add supplemental log data;

To confirm this change:
select name, SUPPLEMENTAL_LOG_DATA_MIN from v$database;


Create a directory to store dictionary file.

mkdir /u01/logs


Specify the location of dictionary file at OS level.

show parameter utl;

alter system set utl_file_dir = '/u01/logs' scope = spfile;
(we must change only in spfile)


Bounce back the database.

startup force;

show parameter utl;


Create a dictionary file.

exec dbms_logmnr_d.build ('dfile', '/u01/logs');


Connect to a sys user and specify the all logfiles to Logminer session.

exec dbms_logmnr.add_logfile ('/u01/prod/redo01.log');
exec dbms_logmnr.add_logfile ('/u01/prod/redo02.log');
exec dbms_logmnr.add_logfile ('/u01/prod/redo03.log');

Start the mining process.

exec dbms_logmnr.start_logmnr (dictfilename => '/u01/logs/dfile');

spool abc.sql
select sql_undo, sql_redo from v$logmnr_contents
where seg_owner = 'SCOTT'
and seg_name = 'DEPT';
spool off;

Thursday, December 17, 2015

Traditional Import and Export Utilities

Export and Import Modes

There are 4 modes:
1. Full
2. Tablespace
3. User
4. Table

Export Session in Full Database Mode

Parameter File Method

exp SYSTEM/manager PARFILE=params.dat

params.dat:

FILE=dba.dmp
GRANTS=y
FULL=y
ROWS=y

Command-Line Method

exp SYSTEM/manager FULL=y FILE=dba.dmp GRANTS=y ROWS=y


Export Session in User Mode

Parameter File Method

exp scott/tiger PARFILE=params.dat

params.dat:

FILE=scott.dmp
OWNER=scott
GRANTS=y
ROWS=y
COMPRESS=y

Command-Line Method

exp scott/tiger FILE=scott.dmp OWNER=scott GRANTS=y ROWS=y COMPRESS=y 


Export Sessions in Table Mode

exp SYSTEM/manager TABLES=a, scott.b, c, mary.d


DBA Exporting Tables for Two Users

Parameter File Method

exp SYSTEM/manager PARFILE=params.dat

params.dat:

FILE=expdat.dmp
TABLES=(scott.emp,u1.t2)
GRANTS=y
INDEXES=y

Command-Line Method

exp SYSTEM/manager FILE=expdat.dmp TABLES=scott.emp,u1.t2 GRANTS=y INDEXES=y


User Exports Tables That He Owns

Parameter File Method

exp scott/tiger PARFILE=params.dat

params.dat:

FILE=scott.dmp
TABLES=(dept,salgrade)
ROWS=y
COMPRESS=y

Command-Line Method

exp scott/tiger FILE=scott.dmp TABLES=dept,salgrade ROWS=y COMPRESS=y


Using Pattern Matching to Export Various Tables

Parameter File Method

exp SYSTEM/manager PARFILE=params.dat

params.dat:

FILE=misc.dmp
TABLES=(scott.%P%,u1.%,scott.%S%)

Command-Line Method

exp SYSTEM/manager FILE=misc.dmp TABLES=scott.%P%,u1.%,scott.%S%



Import Sessions

Import of Selected Tables for a Specific User

Parameter File Method

imp SYSTEM/manager PARFILE=params.dat

params.dat:

FILE=dba.dmp
SHOW=n
IGNORE=n
GRANTS=y
FROMUSER=scott
TABLES=(dept,emp)

Command-Line Method

imp SYSTEM/manager FILE=dba.dmp FROMUSER=scott TABLES=(dept,emp)


Import of Tables Exported by Another User

Parameter File Method

imp SYSTEM/manager PARFILE=params.dat

params.dat:

FILE=blake.dmp
SHOW=n
IGNORE=n
GRANTS=y
ROWS=y
FROMUSER=blake
TOUSER=scott
TABLES=(unit,manager)

Command-Line Method

imp SYSTEM/manager FROMUSER=blake TOUSER=scott FILE=blake.dmp - TABLES=(unit,manager)


Import of Tables from One User to Another

Parameter File Method
imp SYSTEM/manager PARFILE=params.dat

params.dat:

FILE=scott.dmp
FROMUSER=scott
TOUSER=blake
TABLES=(*)

Command-Line Method
imp SYSTEM/manager FILE=scott.dmp FROMUSER=scott TOUSER=blake TABLES=(*)


Import Using Pattern Matching to Import Various Tables

Parameter File Method
imp SYSTEM/manager PARFILE=params.dat

params.dat:

FILE=scott.dmp
IGNORE=n
GRANTS=y
ROWS=y
FROMUSER=scott
TABLES=(%d%,b%s)

Command-Line Method
imp SYSTEM/manager FROMUSER=scott FILE=scott.dmp TABLES=(%d%,b%s)


Command-Line Entries

exp username/password PARAMETER=value
exp username/password PARAMETER=(value1, value2,...,valuen)

Parameter Files

exp PARFILE=filename
exp username/password PARFILE=filename

exp username/password PARFILE=params.dat INDEXES=n


Interactive Mode

exp username/password

exp scott/tiger FILE = dat1.dmp, dat2.dmp, dat3.dmp FILESIZE=2048

The following is an example of specifying an SCN. When the export is performed, the data will be consistent as of SCN 3482971.

exp system/manager FILE=exp.dmp FLASHBACK_SCN=3482971

exp system/manager FILE=exp.dmp FLASHBACK_TIME="TIMESTAMP '2002-05-01 11:00:00'"

exp system/manager FILE=exp.dmp FLASHBACK_TIME="TO_TIMESTAMP('12-02-2001 14:35:00', 'DD-MM-YYYY HH24:MI:SS')"

exp system/manager FILE=exp.dmp FLASHBACK_TIME="'2002-05-01 11:00:00'"

exp SYSTEM/manager LOG=export.log


Query

exp scott/tiger TABLES=emp QUERY=\"WHERE job=\'SALESMAN\' and sal \<1600\"

It will create a query at runtime like below:
SELECT * FROM emp WHERE job='SALESMAN' and sal <1600; 

Wednesday, December 16, 2015

SQL Loader

SQL*Loader loads data from external files into tables of an Oracle database.

Under scott user we have a table called DEPT.  We will try to load data from a sample text file to dept table.

Create a sample datafile at OS level.

dept.dat
50,dept50,delhi
60,dept60,hyd


Create a control file based on dept table and data file.

dept.ctl

LOAD DATA
INFILE '/home/oracle/dmp/dept.dat'
INTO TABLE dept
APPEND
FIELDS TERMINATED BY ","
(deptno,dname,loc)

Invoke the SQL*Loader

sqlldr userid=scott/tiger control=dept.ctl log=dept.log


Tuesday, December 15, 2015

RMAN Backup Scripts - Realtime Usefull

Simple backup script

$rman target sys/manager@toprod @offbak.rman

offbak.rman
run{
shutdown immediate;
startup mount;
allocate channel c1 type disk;
backup database format '/u01/bkp/fullbak.bus';
alter database open;
}

One more simple RMAN script

RMAN> RUN{
# backup the database to disk
ALLOCATE CHANNEL c1 TYPE DISK;
ALLOCATE CHANNEL c2 TYPE DISK;
ALLOCATE CHANNEL c3 TYPE DISK;
#backup the whole db
BACKUP
TAG whole_database_open
FORMAT '/u01/oradata/backups/db_%t_%s_p%p'
DATABASE;
# switch the current log file
SQL 'alter system archive log current';
#backup the archived logs
BACKUP
ARCHIVELOG ALL
FORMAT '/u11/oradata/backups/al_%t_%s_p%p';
# backup a copy of the control file
BACKUP
CURRENT CONTROLFILE
TAG = cf1
FORMAT '/u12/oradata/backups/cf_%t_%s_p%p';
RELEASE channel c1;
RELEASE channel c2;
RELEASE channel c3;
}


Real time backup script for a tape (with media manager)

create script weekly_full_tape{
ALLOCATE CHANNEL ch00 TYPE 'SBT_TAPE';
ALLOCATE CHANNEL ch01 TYPE 'SBT_TAPE';
SEND 'NB_ORA_CLIENT=175.16.34.25,NB_ORA_POLICY=DMS_OraDB_rman';
BACKUP
INCREMENTAL Level=0
FORMAT 'db_df_%U_%t'
DATABASE;
RELEASE CHANNEL ch00;
RELEASE CHANNEL ch01;
ALLOCATE CHANNEL ch00 TYPE 'SBT_TAPE';
SEND 'NB_ORA_CLIENT=175.16.34.25,NB_ORA_POLICY=DMS_OraDB_rman';
BACKUP
FORMAT 'db_arc_%U_%t'
ARCHIVELOG ALL;
RELEASE CHANNEL ch00;
ALLOCATE CHANNEL ch00 TYPE 'SBT_TAPE';
SEND 'NB_ORA_CLIENT=175.16.34.25,NB_ORA_POLICY=DMS_OraDB_rman';
BACKUP
FORMAT 'db_cf_%U_%t'
CURRENT CONTROLFILE;
RELEASE CHANNEL ch00;}


Real time daily backup script incremental

create script daily_inc_disk{
allocate channel c1 device type disk format '/home/oracle/bkp/db_df_%U_%t';
allocate channel c2 device type disk format '/home/oracle/bkp/db_df_%U_%t';
allocate channel c3 device type disk format '/home/oracle/bkp/db_df_%U_%t';
backup incremental level 1 database;
backup archivelog all format '/home/oracle/bkp/arc_%d_%u_%s_%T' not backed up 2 times;
delete noprompt archivelog UNTIL TIME 'sysdate-6' backed up 1 times to device type disk;
release channel c1;
release channel c2;
release channel c3;}


Real time backup script (Weekly, Full, Incremental)

create script weekly_full_disk{
allocate channel c1 device type disk format '/home/oracle/bkp/db_df_%U_%t';
allocate channel c2 device type disk format '/home/oracle/bkp/db_df_%U_%t';
allocate channel c3 device type disk format '/home/oracle/bkp/db_df_%U_%t';
backup incremental level 0 database;
backup archivelog all format '/home/oracle/bkp/arc_%d_%u_%s_%T' not backed up 2 times;
delete noprompt archivelog UNTIL TIME 'sysdate-6' backed up 1 times to device type disk;
release channel c1;
release channel c2;
release channel c3;}

Monday, December 14, 2015

Data Guard

Primary Database
A Data Guard configuration contains one production database, also referred to as the primary database, that functions in the primary role.


Standby Database

A standby database is a transactionally consistent copy of the primary database.


There are 3 types of standby databases possible in Data Guard Configuration:

1. Physical standby database
2. Logical standby database
3. Snapshot Standby database

Physical standby database:
Provides a physically identical copy of the primary database, with on disk
database structures that are identical to the primary database on a block-for-
block basis. The database schema, including indexes, are the same. A physical standby database is kept synchronized with the primary database, through Redo Apply, which recovers the redo data received from the primary database and applies the redo to the physical standby database.



Logical standby database:
Contains the same logical information as the production database, although the physical organization and structure of the data can be different. The logical standby database is kept synchronized with the primary database through SQL Apply, which transforms the data in the redo received from the primary database into SQL statements and then executes the SQL statements on the standby database.



Snapshot Standby Database:
A snapshot standby database is a fully updatable standby database.

Like a physical or logical standby database, a snapshot standby database receives and archives redo data from a primary database. Unlike a physical or logical standby database, a snapshot standby database does not apply the redo data that it receives. The redo data received by a snapshot standby database is not applied until the snapshot standby is converted back into a physical standby database, after first discarding any local updates made to the snapshot standby database.

Data Guard Services

1. Redo Transport Services

2. Apply Services
3. Role Transitions

A physical standby database Data Guard Protection Modes
1. Maximum availability
2. Maximum performance (default)
3. Maximum protection


Actual Steps of configuring a data guard in max performance mode

steps
===

keep 2 terminals open for primary and standby

primary database = prod
standby database = standby

on primary

export ORACLE_SID=prod
sqlplus / as sysdba
startup;

archive log list;
make sure primary is in archive log mode. as we are going to use RMAN to configure dataguard concept.  RMAN works only with archivelog mode.

show parameter spfile;
(to check the system is working with spfile or pfile)

create pfile from spfile;
(will create initprod.ora which can be edited)

shut immediate;

exit;

cd $ORACLE_HOME/dbs
rm spfileprod.ora
vi initprod.ora

add the following parameters:
*.db_unique_name='hyd'
*.instance_name='prod'
*.log_archive_dest_1='location=/u01/prod/arch'
*.log_archive_dest_2='service=topune'
*.standby_file_management=auto
:wq

cp initprod.ora initstandby.ora

create the necessary directory structure for standby database
cd (to go to home directory)
mkdir -p /u01/standby/arch
mkdir -p /u01/app/oracle/admin/standby/adump

cd $ORACLE_HOME/dbs
vi initstandby.ora
replace all "prod" with "standby"
:%s/prod/standby/g

change the following parameters:
db_name='prod' (must be same across all the servers)
db_unique_name='pune'
instance_name='standby'
remove log_archive_dest_2
db_file_name_convert='/u01/prod','/u01/standby'
log_file_name_convert='/u01/prod','/u01/standby'
:wq

on primary

export ORACLE_SID=prod
sqlplus / as sysdba
startup mount;
ALTER DATABASE FORCE LOGGING;
(As dataguard concept works based on redo log, force logging is must)
exit;

cd $ORACLE_HOME/dbs
create password file for both primary and standby

orapwd file=orapwprod password=manager force=y ignorecase=y
orapwd file=orapwstandby password=manager force=y ignorecase=y

sqlplus / as sysdba
select status from v$instance;
(this is primary and must be in mount stage)
exit;

on standby

export ORACLE_SID=standby
sqlplus / as sysdba
startup nomount;

Configure a listener for standby database and a tns entry "topune" for the same.
As this standby will be in pune city so the connection which goes to standby listener will be called as "topune".

But the database name must be same "prod".  While creating tnsname also service name must be prod

Go to Linux terminal
$netmgr
create a listener list_standby for standby database
create a connect string tns service name "topune"

$lsnrctl start list_standby
$tnsping topune

on primary

$rman target / nocatalog auxiliary sys/manager@topune

RMAN> duplicate target database for standby from active database;

exit;

sqlplus / as sysdba

select name, open_mode,database_role,protection_mode from v$database;

col db_unique_name format a15

select db_unique_name, open_mode,database_role,protection_mode from v$database;

alter database open;

on standby

shut immediate;

startup nomount;
alter database mount standby database;
alter database open read only;

alter database recover managed standby database disconnect;
alter database recover managed standby database disconnect from session;

select name, open_mode,database_role,protection_mode from v$database;

select db_unique_name, open_mode,database_role,protection_mode from v$database;

on primary

go to scott user and make some txn
sqlplus / as sysdba
conn scott/tiger
create table t1 as select * from emp;
insert into t1 select * from t1;
/
/
commit;

conn / as sysdba
alter system switch logfile;
archive log list;
note down the latest number

check the same archive log list at standby
both must be same. once we issue the "disconnect" command standby starts getting archive logs from primary and apply them locally. after a couple of minutes based on number of archive files both will be same.

If any files not moved towards standby copy them manually and use the below command. once we apply few others will start applying themself.
SQL> alter database register logfile '/var/arch/arch_1_101.arc';
If we have too many files we cant do it manual. Here we can use the below RMAN command:
rman> catalog start with '/var/arch';

Verify the Physical Standby Database Is Performing Properly

Step 1 Identify the existing archived redo log files.

select sequence#,first_time,next_time from v$archived_log order by 1;

Step 2 Force a log switch to archive the current online redo log file.

SQL> ALTER SYSTEM SWITCH LOGFILE;

Step 3 Verify the new redo data was archived on the standby database.

select sequence#,first_time,next_time from v$archived_log order by 1;

Step 4 Verify that received redo has been applied.

SQL> SELECT SEQUENCE#,APPLIED FROM V$ARCHIVED_LOG ORDER BY 1;

Note: The value of the APPLIED column for the most recently
received log file will be either IN-MEMORY or YES if that log file has been
applied.

Once the apply is uptodate we can keep the standby in read only mode.

alter database recover managed standby database cancel;



Converting from Max Performance Mode to Max Availability Mode

shut both primary and standby

at primary

cd $ORACLE_HOME/dbs
vi initprod.ora
log_archive_dest_2='service=topune lgwr'
:wq

sqlplus / as sysdba

startup mount

alter database set standby database to maximize availability;

alter database open;

select name,open_mode,database_role, protection_mode from v$database;

With this primary is ready with max availability mode.  At standby we need to create 3 redo log files so that primary can write directly into standby logs.

At Standby

sqlplus / as sysdba
startup nomount;
alter database mount standby database;
alter database open read only;

select name,open_mode,database_role, protection_mode from v$database;

select member from v$logfile;

alter database add standby logfile group 4 '/home/oracle/standby/redo04.log'
size 50m;

alter database add standby logfile group 5 '/home/oracle/standby/redo05.log'
size 50m;

alter database add standby logfile group 6 '/home/oracle/standby/redo06.log'
size 50m;



select member from v$logfile;



select group#,status from v$managed_standby;


select group#,status from v$standby_log;

select name,open_mode,database_role, protection_mode from v$database;

shut immediate;

startup nomount;

alter database mount standby database;

alter database open read only;

select name,open_mode,database_role, protection_mode from v$database;

alter database recover managed standby database disconnect using current logfile;

alter database recover managed standby database cancel;


Converting from Max Availability Mode to Max Protection

at primary

shut immediate
exit

cd $ORACLE_HOME/dbs

vi initprod.ora

log_archive_dest_2='service=topune lgwr sync affirm'
:wq

At standby

sqlplus / as sysdba
startup

At Primary

sqlplus / as sysdba

startup mount

alter database set standby database to maximize protection;

alter database open;

select name,open_mode,database_role,protection_mode from v$database;

at standby

select name,open_mode,database_role,protection_mode from v$database;

should be same

shut immediate
startup

alter database recover managed standby database disconnect using current logfile;

alter database recover managed standby database cancel;





Snapshot Standby Database

FRA must be enabled at standby, to configure Snapshot Standby Database.

show parameter recover

shut immediate
exit

startup mount

alter database convert to snapshot standby;

alter database open;

select name,open_mode,database_role,protection_mode from v$database;

now open mode will be read write

make some dml, ddl for testing

shut immediate

startup mount

alter database convert to physical standby;

alter database open;

all the ddl, dml done previously will be lost and standby will come to the position where we converted it to snapshot standby database.