Create a Duplicate (Auxiliary) Oracle database on same host with RMAN
orcl database up and running
main database name = orcl
Auxiliary database name= dupli
export ORACLE_SID=dupli
first create passpword file
go to following location
cd $ORACLE_HOME/dbs/
then create password file
orapwd file=orapwdupli password=manager entries=5 force=y
Location of the file would be
cd $ORACLE_HOME/network/admin/
then create listener.ora file
LISTENER =
(DESCRIPTION_LIST =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.2.251)(PORT = 1521))
)
)
SID_LIST_LISTENER =
(SID_LIST =
(SID_DESC =
(GLOBAL_DBNAME = orcl)
(ORACLE_HOME = /opt/app/oracle/product/10.2/db)
(SID_NAME = orcl)
)
(SID_DESC =
(GLOBAL_DBNAME = dupli)
(ORACLE_HOME = /opt/app/oracle/product/10.2/db)
(SID_NAME = dupli)
)
)
then create tnsnames.ora file
Location of the file would be
cd $ORACLE_HOME/network/admin/
orcl =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.2.251)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = orcl)
)
)
dupli =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.2.251)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = dupli)
)
)
create init ora file for dupli instance
Create Directories for the duplicate Database as given in init ora file
====
[apt-amd-02:oracle:dupli]$ cat init_dupli.txt
dupli.__db_cache_size=297795584
dupli.__java_pool_size=4194304
dupli.__large_pool_size=4194304
dupli.__shared_pool_size=130023424
dupli.__streams_pool_size=0
*.audit_file_dest='/opt/app/oracle/admin/dupli/adump'
*.background_dump_dest='/opt/app/oracle/admin/dupli/bdump'
*.compatible='10.2.0.1.0'
*.control_files='/opt/app/oracle/oradata/dupli/control01.ctl','/opt/app/oracle/oradata/dupli/control02.ctl','/opt/app/oracle/oradata/dupli/control03.ctl'
*.core_dump_dest='/opt/app/oracle/admin/dupli/cdump'
*.db_block_size=8192
*.db_domain=''
*.db_file_multiblock_read_count=16
*.db_name='dupli'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=orclXDB)'
*.job_queue_processes=10
*.nls_language='ENGLISH'
*.nls_territory='UNITED KINGDOM'
*.open_cursors=300
*.pga_aggregate_target=146800640
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=440401920
*.undo_management='AUTO'
*.undo_tablespace='UNDOTBS1'
*.user_dump_dest='/opt/app/oracle/admin/dupli/udump'
*.STANDBY_FILE_MANAGEMENT=AUTO
*.remote_login_passwordfile=EXCLUSIVE
*.db_file_name_convert='/opt/app/oracle/oradata/orcl/orcl' '/opt/app/oracle/oradata/dupli'
*.log_file_name_convert='/opt/app/oracle/oradata/orcl/orcl' '/opt/app/oracle/oradata/dupli'
*.instance_name='dupli'
*.DB_UNIQUE_NAME='dupli'
*.remote_login_passwordfile=EXCLUSIVE
================================
init ora file for main database
[apt-amd-02:oracle:orcl]$ cat init_orcl.txt
orcl.__db_cache_size=297795584
orcl.__java_pool_size=4194304
orcl.__large_pool_size=4194304
orcl.__shared_pool_size=130023424
orcl.__streams_pool_size=0
*.audit_file_dest='/opt/app/oracle/admin/orcl/adump'
*.background_dump_dest='/opt/app/oracle/admin/orcl/bdump'
*.compatible='10.2.0.1.0'
*.control_files='/opt/app/oracle/oradata/orcl/orcl/control01.ctl','/opt/app/oracle/oradata/orcl/orcl/control02.ctl','/opt/app/oracle/oradata/orcl/orcl/control03.ctl'
*.core_dump_dest='/opt/app/oracle/admin/orcl/cdump'
*.db_block_size=8192
*.db_domain=''
*.db_file_multiblock_read_count=16
*.db_name='orcl'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=orclXDB)'
*.job_queue_processes=10
*.nls_language='ENGLISH'
*.nls_territory='UNITED KINGDOM'
*.open_cursors=300
*.pga_aggregate_target=146800640
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=440401920
*.undo_management='AUTO'
*.undo_tablespace='UNDOTBS1'
*.user_dump_dest='/opt/app/oracle/admin/orcl/udump'
*.log_archive_dest_1='LOCATION=/opt/app/oracle/archive/ MANDATORY'
*.log_archive_dest_2='service=orcl lgwr'
*.FAL_SERVER='standby'
*.FAL_CLIENT='orcl'
*.STANDBY_FILE_MANAGEMENT=AUTO
*.remote_login_passwordfile=EXCLUSIVE
======
take main database backup
through RMAN
script is given below
[apt-amd-02:oracle:orcl]$ cat rman.sh
rman nocatalog target / <<-EOF
configure retention policy to recovery window of 3 days;
configure backup optimization on;
configure controlfile autobackup on;
configure default device type to disk;
configure device type disk parallelism 1 backup type to compressed backupset;
configure datafile backup copies for device type disk to 1;
configure maxsetsize to unlimited;
configure snapshot controlfile name to '/opt/app/oracle/oradata/rmanback/snapshot_controlfile';
show all;
run {
allocate channel ch1 type Disk maxpiecesize = 1900M;
backup full database noexclude
include current controlfile
format '/opt/app/oracle/oradata/rmanback/datafile_%s_%p.bak'
tag 'datafile_daily';
}
run {
allocate channel ch1 type Disk maxpiecesize = 1900M;
backup archivelog all
delete all input
format '/opt/app/oracle/oradata/rmanback/archivelog_%s_%p.bak'
tag 'archivelog_daily';
}
run {
allocate channel ch1 type Disk maxpiecesize = 1900M;
backup format '/opt/app/oracle/oradata/rmanback/controlfile_%s.bak' current controlfile;
}
crosscheck backup;
list backup of database;
report unrecoverable;
report schema;
report need backup;
report obsolete;
delete noprompt expired backup of database;
delete noprompt expired backup of controlfile;
delete noprompt expired backup of archivelog all;
delete noprompt obsolete recovery window of 3 days;
quit
EOF
run RMAN backup script . backup will be store in /opt/app/oracle/oradata/rmanback/
[apt-amd-02:oracle:orcl]$ sh rman.sh
========
now start duplicate database ...
[apt-amd-02:oracle:dupli]$
Duplicate the Database
Now you are ready to duplicate the database orcl to dpuli.
export ORACLE_SID=dupli
sqlplus sys/manager as sysdba
startup force nomount pfile='/home/oracle/init_dupli.txt';
=====
export ORACLE_SID=orcl
[apt-amd-02:oracle:ORCL]$
sqlplus / as sysdba
run this query to idetified the files
set feed off
set pagesize 10000
column name format a40 heading "Datafile"
column file# format 99 heading "File-ID"
column member format a40 heading "Logfile"
column group# format 99 heading "Group-Nr"
select name, file# from v$dbfile;
select member, group# from v$logfile;
/opt/app/oracle/oradata/orcl/orcl/redo01.log 1
/opt/app/oracle/oradata/orcl/orcl/redo02.log 2
/opt/app/oracle/oradata/orcl/orcl/redo03.log 3
/opt/app/oracle/oradata/orcl/orcl/system01.dbf 1
/opt/app/oracle/oradata/orcl/orcl/undotbs01.dbf 2
/opt/app/oracle/oradata/orcl/orcl/sysaux01.dbf 3
/opt/app/oracle/oradata/orcl/orcl/users01.dbf 4
then create following script
RUN
{
SET NEWNAME FOR DATAFILE 1 TO '/opt/app/oracle/oradata/dupli/dupli_sys1.dbf';
SET NEWNAME FOR DATAFILE 2 TO '/opt/app/oracle/oradata/dupli/dupli_undo1.dbf';
SET NEWNAME FOR DATAFILE 3 TO '/opt/app/oracle/oradata/dupli/dupli_sysaux1.dbf';
SET NEWNAME FOR DATAFILE 4 TO '/opt/app/oracle/oradata/dupli/dupli_users1.dbf';
DUPLICATE TARGET DATABASE TO dupli
PFILE = '/home/oracle/init_dupli.txt'
NOFILENAMECHECK
LOGFILE GROUP 1 ('/opt/app/oracle/oradata/dupli/dupli_log1A.rdo') SIZE 50M REUSE ,
GROUP 2 ('/opt/app/oracle/oradata/dupli/dupli_log2A.rdo') SIZE 50M REUSE,
GROUP 3 ('/opt/app/oracle/oradata/dupli/dupli_log3A.rdo') SIZE 50M REUSE ; }
===========
Now connect RMAN
[apt-amd-02:oracle:orcl]$ rman TARGET / AUXILIARY sys/manager@dupli
Recovery Manager: Release 10.2.0.1.0 - Production on Thu Oct 15 14:13:24 2009
Copyright (c) 1982, 2005, Oracle. All rights reserved.
connected to target database: ORCL (DBID=1226728274)
connected to auxiliary database: DUPLI (not mounted)
run this script
RMAN> RUN
{
SET NEWNAME FOR DATAFILE 1 TO '/opt/app/oracle/oradata/dupli/dupli_sys1.dbf';
SET NEWNAME FOR DATAFILE 2 TO '/opt/app/oracle/oradata/dupli/dupli_undo1.dbf';
SET NEWNAME FOR DATAFILE 3 TO '/opt/app/oracle/oradata/dupli/dupli_sysaux1.dbf';
SET NEWNAME FOR DATAFILE 4 TO '/opt/app/oracle/oradata/dupli/dupli_users1.dbf';
DUPLICATE TARGET DATABASE TO dupli
PFILE = '/home/oracle/init_dupli.txt'
NOFILENAMECHECK
LOGFILE GROUP 1 ('/opt/app/oracle/oradata/dupli/dupli_log1A.rdo') SIZE 50M REUSE ,
GROUP 2 ('/opt/app/oracle/oradata/dupli/dupli_log2A.rdo') SIZE 50M REUSE,
GROUP 3 ('/opt/app/oracle/oradata/dupli/dupli_log3A.rdo') SIZE 50M REUSE ; }2> 3> 4> 5> 6> 7> 8> 9> 10> 11> 12>
executing command: SET NEWNAME
using target database control file instead of recovery catalog
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
Starting Duplicate Db at 15-OCT-09
allocated channel: ORA_AUX_DISK_1
channel ORA_AUX_DISK_1: sid=155 devtype=DISK
contents of Memory Script:
{
set until scn 871634;
set newname for datafile 1 to
"/opt/app/oracle/oradata/dupli/dupli_sys1.dbf";
set newname for datafile 2 to
"/opt/app/oracle/oradata/dupli/dupli_undo1.dbf";
set newname for datafile 3 to
"/opt/app/oracle/oradata/dupli/dupli_sysaux1.dbf";
set newname for datafile 4 to
"/opt/app/oracle/oradata/dupli/dupli_users1.dbf";
restore
check readonly
clone database
;
}
executing Memory Script
executing command: SET until clause
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
Starting restore at 15-OCT-09
using channel ORA_AUX_DISK_1
channel ORA_AUX_DISK_1: starting datafile backupset restore
channel ORA_AUX_DISK_1: specifying datafile(s) to restore from backup set
restoring datafile 00001 to /opt/app/oracle/oradata/dupli/dupli_sys1.dbf
restoring datafile 00002 to /opt/app/oracle/oradata/dupli/dupli_undo1.dbf
restoring datafile 00003 to /opt/app/oracle/oradata/dupli/dupli_sysaux1.dbf
restoring datafile 00004 to /opt/app/oracle/oradata/dupli/dupli_users1.dbf
channel ORA_AUX_DISK_1: reading from backup piece /opt/app/oracle/oradata/rmanback/datafile_1_1.bak
channel ORA_AUX_DISK_1: restored backup piece 1
piece handle=/opt/app/oracle/oradata/rmanback/datafile_1_1.bak tag=DATAFILE_DAILY
channel ORA_AUX_DISK_1: restore complete, elapsed time: 00:01:07
Finished restore at 15-OCT-09
sql statement: CREATE CONTROLFILE REUSE SET DATABASE "DUPLI" RESETLOGS ARCHIVELOG
MAXLOGFILES 16
MAXLOGMEMBERS 3
MAXDATAFILES 100
MAXINSTANCES 8
MAXLOGHISTORY 292
LOGFILE
GROUP 1 ( '/opt/app/oracle/oradata/dupli/dupli_log1A.rdo' ) SIZE 50 M REUSE,
GROUP 2 ( '/opt/app/oracle/oradata/dupli/dupli_log2A.rdo' ) SIZE 50 M REUSE,
GROUP 3 ( '/opt/app/oracle/oradata/dupli/dupli_log3A.rdo' ) SIZE 50 M REUSE
DATAFILE
'/opt/app/oracle/oradata/dupli/dupli_sys1.dbf'
CHARACTER SET WE8ISO8859P1
contents of Memory Script:
{
switch clone datafile all;
}
executing Memory Script
released channel: ORA_AUX_DISK_1
datafile 2 switched to datafile copy
input datafile copy recid=1 stamp=700323282 filename=/opt/app/oracle/oradata/dupli/dupli_undo1.dbf
datafile 3 switched to datafile copy
input datafile copy recid=2 stamp=700323282 filename=/opt/app/oracle/oradata/dupli/dupli_sysaux1.dbf
datafile 4 switched to datafile copy
input datafile copy recid=3 stamp=700323282 filename=/opt/app/oracle/oradata/dupli/dupli_users1.dbf
contents of Memory Script:
{
set until scn 871634;
recover
clone database
delete archivelog
;
}
executing Memory Script
executing command: SET until clause
Starting recover at 15-OCT-09
allocated channel: ORA_AUX_DISK_1
channel ORA_AUX_DISK_1: sid=155 devtype=DISK
starting media recovery
archive log thread 1 sequence 43 is already on disk as file /opt/app/oracle/archive/1_43_698947538.dbf
archive log filename=/opt/app/oracle/archive/1_43_698947538.dbf thread=1 sequence=43
media recovery complete, elapsed time: 00:00:03
Finished recover at 15-OCT-09
contents of Memory Script:
{
shutdown clone;
startup clone nomount pfile= '/home/oracle/init_dupli.txt';
}
executing Memory Script
database dismounted
Oracle instance shut down
connected to auxiliary database (not started)
Oracle instance started
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 130025440 bytes
Database Buffers 306184192 bytes
Redo Buffers 2170880 bytes
sql statement: CREATE CONTROLFILE REUSE SET DATABASE "DUPLI" RESETLOGS ARCHIVELOG
MAXLOGFILES 16
MAXLOGMEMBERS 3
MAXDATAFILES 100
MAXINSTANCES 8
MAXLOGHISTORY 292
LOGFILE
GROUP 1 ( '/opt/app/oracle/oradata/dupli/dupli_log1A.rdo' ) SIZE 50 M REUSE,
GROUP 2 ( '/opt/app/oracle/oradata/dupli/dupli_log2A.rdo' ) SIZE 50 M REUSE,
GROUP 3 ( '/opt/app/oracle/oradata/dupli/dupli_log3A.rdo' ) SIZE 50 M REUSE
DATAFILE
'/opt/app/oracle/oradata/dupli/dupli_sys1.dbf'
CHARACTER SET WE8ISO8859P1
contents of Memory Script:
{
set newname for tempfile 1 to
"/opt/app/oracle/oradata/dupli/temp01.dbf";
switch clone tempfile all;
catalog clone datafilecopy "/opt/app/oracle/oradata/dupli/dupli_undo1.dbf";
catalog clone datafilecopy "/opt/app/oracle/oradata/dupli/dupli_sysaux1.dbf";
catalog clone datafilecopy "/opt/app/oracle/oradata/dupli/dupli_users1.dbf";
switch clone datafile all;
}
executing Memory Script
executing command: SET NEWNAME
renamed temporary file 1 to /opt/app/oracle/oradata/dupli/temp01.dbf in control file
cataloged datafile copy
datafile copy filename=/opt/app/oracle/oradata/dupli/dupli_undo1.dbf recid=1 stamp=700325657
cataloged datafile copy
datafile copy filename=/opt/app/oracle/oradata/dupli/dupli_sysaux1.dbf recid=2 stamp=700325658
cataloged datafile copy
datafile copy filename=/opt/app/oracle/oradata/dupli/dupli_users1.dbf recid=3 stamp=700325658
datafile 2 switched to datafile copy
input datafile copy recid=1 stamp=700325657 filename=/opt/app/oracle/oradata/dupli/dupli_undo1.dbf
datafile 3 switched to datafile copy
input datafile copy recid=2 stamp=700325658 filename=/opt/app/oracle/oradata/dupli/dupli_sysaux1.dbf
datafile 4 switched to datafile copy
input datafile copy recid=3 stamp=700325658 filename=/opt/app/oracle/oradata/dupli/dupli_users1.dbf
contents of Memory Script:
{
Alter clone database open resetlogs;
}
executing Memory Script
database opened
Finished Duplicate Db at 15-OCT-09
===============
Now Duplicate database ready with the RESETLOGS option.
SQL> !echo $ORACLE_HOME
/opt/app/oracle/product/10.2/db
SQL> !echo $ORACLE_SID
dupli
SQL> select NAME from v$database;
NAME
---------
DUPLI
SQL> select FILE_NAME from dba_data_files ;
FILE_NAME
--------------------------------------------------------------------------------
/opt/app/oracle/oradata/dupli/dupli_sys1.dbf
/opt/app/oracle/oradata/dupli/dupli_undo1.dbf
/opt/app/oracle/oradata/dupli/dupli_sysaux1.dbf
/opt/app/oracle/oradata/dupli/dupli_users1.dbf
you can comment the DB_FILE_NAME_CONVERT and LOG_FILE_NAME_CONVERT parameter in the init_dupli.txt file and restart the database.
Search This Blog
Total Pageviews
Friday, 16 October 2009
Creating a physical standby database on same host
Are you sure you want to delete this post?
Creating a physical standby database on same host
Creating a physical standby database on same host
Oracle provides two types of standby databases:
1. Physical Standby Database
Archived redo log transferred from primary database will be directly applied to the standby database.
2. Logical Standby Database
we create SQL statements then these statements will be applied to stand by database.
The key advantage for logical standby databases is that they're opened read/write, even while they're in applied mode
It is important to identify unsupported database objects on the primary database before you create a logical standby database.
This is because changes made to unsupported datatypes, table, sequences,
or views on the primary database will not be propagated to
the logical standby database. Moreover, no error message will be returned.
SELECT DISTINCT OWNER,TABLE_NAME FROM DBA_LOGSTDBY_UNSUPPORTED ORDER BY OWNER,TABLE_NAME;
SELECT COLUMN_NAME,DATA_TYPE FROM DBA_LOGSTDBY_UNSUPPORTED WHERE OWNER='XXX' AND TABLE_NAME = 'XXXXX';
==================
We are creating physical standby database on same host
orcl instance is up and running on archive log mode .
init ora file for orcl instance
=================================
[apt-amd-02:oracle:orcl]$ cat init_orcl.txt
orcl.__db_cache_size=297795584
orcl.__java_pool_size=4194304
orcl.__large_pool_size=4194304
orcl.__shared_pool_size=130023424
orcl.__streams_pool_size=0
*.audit_file_dest='/opt/app/oracle/admin/orcl/adump'
*.background_dump_dest='/opt/app/oracle/admin/orcl/bdump'
*.compatible='10.2.0.1.0'
*.control_files='/opt/app/oracle/oradata/orcl/orcl/control01.ctl','/opt/app/oracle/oradata/orcl/orcl/control02.ctl','/opt/app/oracle/oradata/orcl/orcl/control03.ctl'
*.core_dump_dest='/opt/app/oracle/admin/orcl/cdump'
*.db_block_size=8192
*.db_domain=''
*.db_file_multiblock_read_count=16
*.db_name='orcl'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=orclXDB)'
*.job_queue_processes=10
*.nls_language='ENGLISH'
*.nls_territory='UNITED KINGDOM'
*.open_cursors=300
*.pga_aggregate_target=146800640
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=440401920
*.undo_management='AUTO'
*.undo_tablespace='UNDOTBS1'
*.user_dump_dest='/opt/app/oracle/admin/orcl/udump'
*.log_archive_dest_1='LOCATION=/opt/app/oracle/archive/ MANDATORY'
*.log_archive_dest_2='service=orcl lgwr'
*.FAL_SERVER='standby'
*.FAL_CLIENT='orcl'
*.STANDBY_FILE_MANAGEMENT=AUTO
*.remote_login_passwordfile=EXCLUSIVE
========================================
*.log_archive_dest_2='service=orcl lgwr'
create following services in tnsnames.ora file
orcl =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.2.251)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = stdby)
)
)
*.FAL_SERVER='standby'
standby =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.2.251)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = stdby)
)
)
====================================
create init ora file for standby database
and create following directories as mention in init ora file
audit_file_dest
background_dump_dest
control_files
core_dump_dest
user_dump_dest
log_archive_dest_1
[apt-amd-02:oracle:stdby]$ cat init_stdby.txt
orcl.__db_cache_size=377487360
orcl.__java_pool_size=4194304
orcl.__large_pool_size=4194304
orcl.__shared_pool_size=146800640
orcl.__streams_pool_size=0
*.audit_file_dest='/opt/app/oracle/admin/stdby/adump'
*.background_dump_dest='/opt/app/oracle/admin/stdby/bdump'
*.compatible='10.2.0.1.0'
*.control_files='/opt/app/oracle/oradata/stdby/control_stdby.ctl'
*.core_dump_dest='/opt/app/oracle/admin/stdby/cdump'
*.db_block_size=8192
*.db_domain=''
*.db_file_multiblock_read_count=16
*.db_name='orcl'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=orclXDB)'
*.job_queue_processes=10
*.nls_language='ENGLISH'
*.nls_territory='UNITED KINGDOM'
*.open_cursors=300
*.pga_aggregate_target=199229440
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=536870912
*.undo_management='AUTO'
*.undo_tablespace='UNDOTBS1'
*.user_dump_dest='/opt/app/oracle/admin/stdby/udump'
*.log_archive_dest_1='LOCATION=/opt/app/oracle/archive MANDATORY'
*.db_file_name_convert='/opt/app/oracle/oradata/orcl/orcl' '/opt/app/oracle/oradata/stdby'
*.log_file_name_convert='/opt/app/oracle/oradata/orcl/orcl' '/opt/app/oracle/oradata/stdby'
*.fal_client='standby'
*.fal_server=''
*.instance_name='stdby'
*.DB_UNIQUE_NAME=stdby
*.remote_login_passwordfile=EXCLUSIVE
==================================
cd /opt/app/oracle/product/10.2/db/dbs
then create password file for each instance
orapwd file=orapworcl password=orcl entries=5 force=y
orapwd file=orapwstdby password=stdby entries=5 force=y
SQL> startup mount pfile='/home/oracle/init_orcl.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database mounted.
sql>alter database create standby controlfile as '/opt/app/oracle/oradata/stdby/control_stdby.ctl' ;
shutdown the main database i.e. orcl
and copy all the file to ....
/opt/app/oracle/oradata/orcl> cp *.* /opt/app/oracle/oradata/stdby/
Now time to start standby database .
ORACLE_SID=stdby
sqlplus / as sysdba
SQL> startup mount pfile='/home/oracle/init_stdby.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database mounted.
on standby database create standby logfiles it should be same as main database
ALTER DATABASE ADD STANDBY LOGFILE group 4 '/opt/app/oracle/oradata/stdby/redo01_stb.log' SIZE 50M;
ALTER DATABASE ADD STANDBY LOGFILE group 5 '/opt/app/oracle/oradata/stdby/redo02_stb.log' SIZE 50M;
ALTER DATABASE ADD STANDBY LOGFILE group 6 '/opt/app/oracle/oradata/stdby/redo03_stb.log' SIZE 50M;
SQL> !ps -ef|grep ora_
oracle 4027 1 0 10:35 ? 00:00:00 ora_pmon_stdby
oracle 4029 1 0 10:35 ? 00:00:00 ora_psp0_stdby
oracle 4031 1 0 10:35 ? 00:00:00 ora_mman_stdby
oracle 4033 1 0 10:35 ? 00:00:00 ora_dbw0_stdby
oracle 4035 1 0 10:35 ? 00:00:00 ora_lgwr_stdby
oracle 4037 1 0 10:35 ? 00:00:00 ora_ckpt_stdby
oracle 4039 1 0 10:35 ? 00:00:00 ora_smon_stdby
oracle 4041 1 0 10:35 ? 00:00:00 ora_reco_stdby
oracle 4043 1 0 10:35 ? 00:00:00 ora_cjq0_stdby
oracle 4046 1 0 10:35 ? 00:00:00 ora_mmon_stdby
oracle 4048 1 0 10:35 ? 00:00:00 ora_mmnl_stdby
oracle 4052 1 0 10:35 ? 00:00:00 ora_d000_stdby
oracle 4055 1 0 10:35 ? 00:00:00 ora_s000_stdby
oracle 4085 1 0 10:35 ? 00:00:00 ora_arc0_stdby
oracle 4087 1 0 10:35 ? 00:00:00 ora_arc1_stdby
SQL> alter database recover managed standby database disconnect from session;
Database altered.
SQL> !ps -ef|grep ora_
oracle 4027 1 0 10:35 ? 00:00:00 ora_pmon_stdby
oracle 4029 1 0 10:35 ? 00:00:00 ora_psp0_stdby
oracle 4031 1 0 10:35 ? 00:00:00 ora_mman_stdby
oracle 4033 1 0 10:35 ? 00:00:00 ora_dbw0_stdby
oracle 4035 1 0 10:35 ? 00:00:00 ora_lgwr_stdby
oracle 4037 1 0 10:35 ? 00:00:00 ora_ckpt_stdby
oracle 4039 1 0 10:35 ? 00:00:00 ora_smon_stdby
oracle 4041 1 0 10:35 ? 00:00:00 ora_reco_stdby
oracle 4043 1 0 10:35 ? 00:00:00 ora_cjq0_stdby
oracle 4046 1 0 10:35 ? 00:00:00 ora_mmon_stdby
oracle 4048 1 0 10:35 ? 00:00:00 ora_mmnl_stdby
oracle 4052 1 0 10:35 ? 00:00:00 ora_d000_stdby
oracle 4055 1 0 10:35 ? 00:00:00 ora_s000_stdby
oracle 4085 1 0 10:35 ? 00:00:00 ora_arc0_stdby
oracle 4087 1 0 10:35 ? 00:00:00 ora_arc1_stdby
oracle 6481 1 3 11:01 ? 00:00:00 ora_mrp0_stdby <<<<<<---- br="" process="" start="" this="" will="">
now standby is ready
If you need to stop log apply services:
SQL> alter database recover managed standby database cancel;
===
now start the main database
SQL> startup pfile='/home/oracle/init_orcl.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database open
PROTECTION_MODE : Protection mode set for the database
PROTECTION_LEVEL : Actual, real time mode in affect for the database; this will change after
outages/faults
Usage Notes
The redo data generated by a transaction must have been received by every enabled destination that has the SYNC attribute
before that transaction can commit.
The redo data generated by a transaction need not have been received at a destination that has the ASYNC attribute before that
transaction can commit. This is the default behavior if neither SYNC or ASYNC is specified.
AFFIRM and NOAFFIRM
Controls whether a redo transport destination acknowledges received redo data before or after writing it to the standby redo log:
AFFIRM—specifies that a redo transport destination acknowledges received redo data after writing it to the standby redo log.
NOAFFIRM—specifies that a redo transport destination acknowledges received redo data before writing it to the standby redo log.
the default is AFFIRM when the SYNC attribute is specified and NOAFFIRM when the ASYNC attribute is
PROTECTION_MODE : Protection mode set for the database
PROTECTION_LEVEL : Actual, real time mode in affect for the database; this will change after outages/faults
MAXIMUM AVAILABILITY is another ‘no-data-loss’ protection mode. It can be argued that it can be placed somewhere between the ‘no-data-loss’ and ‘minimal-data-loss’ category. The setup of maximum availability protection is the same as maximum protection. In MAXIMUM AVAILABILITY mode, the primary database does not halt if it cannot transmit redo data to at least one participating standby database. During this period the protection mode is switched to MAXIMUM PERFORMANCE, the lowest level of data protection.
When Data Guard is in MAXIMUM PERFORMANCE mode, data can potentially be lost if the primary database crashes and the redo logs have not been transferred to any archival destination. The protection mode is upgraded to MAXIMUM AVAILABILITY on next log switch if the log transfer service has resumed transmission of the redo data to at least one RAC Grid participating in maximum availability configuration and all the archive gap sequences have been resolved.
The redo data generated by a transaction must have been received by every enabled destination that has the SYNC attribute
before that transaction can commit.
* The redo data generated by a transaction need not have been received at a destination that has the ASYNC attribute before that
transaction can commit. This is the default behavior if neither SYNC or ASYNC is specified.
* FAL_CLIENT specifies the TNS network services name for the standby database (which is sent to the FAL server process by log apply services) that should be used by the FAL server process to connect to the standby database. The syntax would be:
FAL_CLIENT=
* FAL_SERVER specifies the TNS network service name that the standby database should use to connect to the FAL server
process. The syntax would be:
FAL_SERVER=
Note that the value provided for FAL_SERVER is not limited to defining only one FAL server; it can define multiple FAL servers (possible a FAL server that is running on another standby database) that are separated by commas as in the following example:
FAL_SERVER=primary_db,standby_db2
FAL server is a background process that is generally located on the primary database server (however it can also be found on
another standby database). FAL server is responsible for servicing incoming requests from the FAL client.
When the LGWR or ARC process on the primary initiates a connection with the standby, the standby listener responds by spawning a process called the remote file server (RFS). The RFS process will create a network connection with the processes on the primary and will sit waiting for data to arrive. Once data begins arriving from the primary, the RFS process will place it into either standby redo logs or archive redo logs. You should think of standby redo logs as exact twins to online redo logs, except for the fact that they are only active when a database is in the standby role. In essence, they are just a separate pool of redo logs. The RFS process will pick the first available standby redo log and begin placing changes into that log. When a log switch occurs on the primary, we switch standby redo logs and the RFS process will go to the next available standby redo log. The standby redo log that we were into prior to the log switch will be archived by the standby database and that archive will be applied by log apply services. Standby redo logs are wonderful things, and Oracle highly recommends that you use them. If you plan on setting one of the higher-level protection modes or plan on using real-time apply , the use of standby redo logs is mandatory.
Register a missing log file
alter database register physical logfile '';
If FAL doesn't work and it says the log is already registered
alter database register or replace physical logfile '';
alter database register or replace physical logfile '';
If that doesn't work, try this...
shutdown immediate
startup nomount
alter database mount standby database;
alter database recover automatic standby database;
==
Disable/Enable archive log destinations
alter system set log_archive_dest_state_2 = 'defer';
alter system set log_archive_dest_state_2 = 'enable';
SQL> SELECT PROCESS, CLIENT_PROCESS, SEQUENCE#, STATUS FROM V$MANAGED_STANDBY;
PROCESS CLIENT_P SEQUENCE# STATUS
------- -------- ---------- ------------
ARCH ARCH 0 CONNECTED
ARCH ARCH 0 CONNECTED
MRP0 N/A 204 WAIT_FOR_LOG
RFS LGWR 204 WRITING
RFS N/A 0 RECEIVING
Issue the following query to show information about the protection mode, the protection level, the role of the database, and switchover status:
set linesize 200
col DATABASE_ROLE format a20
col INSTANCE format a10
col OPEN_MODE format a15
col PROTECTION_MODE format a20
col PROTECTION_LEVEL format a20
col SWITCHOVER_STATUS format a20
SELECT DATABASE_ROLE, DB_UNIQUE_NAME INSTANCE, OPEN_MODE,PROTECTION_MODE, PROTECTION_LEVEL, SWITCHOVER_STATUS FROM V$DATABASE;
DATABASE_ROLE INSTANCE OPEN_MODE PROTECTION_MODE PROTECTION_LEVEL SWITCHOVER_STATUS
-------------------- ---------- --------------- -------------------- -------------------- --------------------
PRIMARY cccdb READ WRITE MAXIMUM PERFORMANCE MAXIMUM PERFORMANCE SESSIONS ACTIVE
Determining Which Log Files Were Not Received by the Standby Site
issue the following query on the primary database:
SQL> SELECT LOCAL.THREAD#, LOCAL.SEQUENCE# FROM
(SELECT THREAD#, SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=1) LOCAL
WHERE LOCAL.SEQUENCE# NOT IN
(SELECT SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=2 AND
THREAD# = LOCAL.THREAD#);
THREAD# SEQUENCE#
---------- ----------
1 12
1 13
1 14
See how up to date a physical standby is Run this on the primary
set numwidth 15
select max(sequence#) current_seq from v$log
/
Then run this on the standby
set numwidth 15
select max(applied_seq#) last_seq from v$archive_dest_status
/
Display info about all log destinations To be run on the primary
set lines 100
set numwidth 15
column ID format 99
column "SRLs" format 99
column active format 99
col type format a4
select ds.dest_id id
, ad.status
, ds.database_mode db_mode
, ad.archiver type
, ds.recovery_mode
, ds.protection_mode
, ds.standby_logfile_count "SRLs"
, ds.standby_logfile_active active
, ds.archived_seq#
from v$archive_dest_status ds
, v$archive_dest ad
where ds.dest_id = ad.dest_id
and ad.status != 'INACTIVE'
order by ds.dest_id
/
List any standby redo logs
set lines 100 pages 999
col member format a70
select st.group#
, st.sequence#
, ceil(st.bytes / 1048576) mb
, lf.member
from v$standby_log st
, v$logfile lf
where st.group# = lf.group#
/
Check which logs are missing Run this on the standby...
select local.thread#
, local.sequence# from
(select thread#
, sequence#
from v$archived_log
where dest_id=1) local
where local.sequence# not in
(select sequence#
from v$archived_log
where dest_id=2 and
thread# = local.thread#)
/
SQL> select name, open_mode, protection_mode, database_role, dataguard_broker, force_logging from v$database;
select 'alter database add standby logfile '''||
regexp_substr(MEMBER,'/.+/')||'stdby_'||
regexp_replace(member,regexp_substr(MEMBER,'/.+/'),'')||
''' size '||bytes||';' "Create Standby redo"
from v$logfile lf , v$log l
where l.group# = lf.group#
/
Dynamic SQL to which generates extra standby redo log following the naming convention, stdby_redo0Group#.rdo
======================================================
create ( statement ) standby redo log file ( statement )
select 'alter database add standby logfile '''||
regexp_substr(MEMBER,'/.+/')||'stdby_'||
regexp_replace(member,regexp_substr(MEMBER,'/.+/'),'')||
''' size '||bytes||';' "Create Standby redo"
from v$logfile lf , v$log l
where l.group# = lf.group#
union all
select 'alter database add standby logfile '''||
regexp_substr(MEMBER,'/.+/')||'stdby_redo0'||(select max(group#)+1 from v$log)||'.rdo'||
''' size '||bytes||';' "Create Standby redo"
from v$logfile lf , v$log l
where l.group# = lf.group#
and rownum <=2
/---->
Creating a physical standby database on same host
Creating a physical standby database on same host
Oracle provides two types of standby databases:
1. Physical Standby Database
Archived redo log transferred from primary database will be directly applied to the standby database.
2. Logical Standby Database
we create SQL statements then these statements will be applied to stand by database.
The key advantage for logical standby databases is that they're opened read/write, even while they're in applied mode
It is important to identify unsupported database objects on the primary database before you create a logical standby database.
This is because changes made to unsupported datatypes, table, sequences,
or views on the primary database will not be propagated to
the logical standby database. Moreover, no error message will be returned.
SELECT DISTINCT OWNER,TABLE_NAME FROM DBA_LOGSTDBY_UNSUPPORTED ORDER BY OWNER,TABLE_NAME;
SELECT COLUMN_NAME,DATA_TYPE FROM DBA_LOGSTDBY_UNSUPPORTED WHERE OWNER='XXX' AND TABLE_NAME = 'XXXXX';
==================
We are creating physical standby database on same host
orcl instance is up and running on archive log mode .
init ora file for orcl instance
=================================
[apt-amd-02:oracle:orcl]$ cat init_orcl.txt
orcl.__db_cache_size=297795584
orcl.__java_pool_size=4194304
orcl.__large_pool_size=4194304
orcl.__shared_pool_size=130023424
orcl.__streams_pool_size=0
*.audit_file_dest='/opt/app/oracle/admin/orcl/adump'
*.background_dump_dest='/opt/app/oracle/admin/orcl/bdump'
*.compatible='10.2.0.1.0'
*.control_files='/opt/app/oracle/oradata/orcl/orcl/control01.ctl','/opt/app/oracle/oradata/orcl/orcl/control02.ctl','/opt/app/oracle/oradata/orcl/orcl/control03.ctl'
*.core_dump_dest='/opt/app/oracle/admin/orcl/cdump'
*.db_block_size=8192
*.db_domain=''
*.db_file_multiblock_read_count=16
*.db_name='orcl'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=orclXDB)'
*.job_queue_processes=10
*.nls_language='ENGLISH'
*.nls_territory='UNITED KINGDOM'
*.open_cursors=300
*.pga_aggregate_target=146800640
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=440401920
*.undo_management='AUTO'
*.undo_tablespace='UNDOTBS1'
*.user_dump_dest='/opt/app/oracle/admin/orcl/udump'
*.log_archive_dest_1='LOCATION=/opt/app/oracle/archive/ MANDATORY'
*.log_archive_dest_2='service=orcl lgwr'
*.FAL_SERVER='standby'
*.FAL_CLIENT='orcl'
*.STANDBY_FILE_MANAGEMENT=AUTO
*.remote_login_passwordfile=EXCLUSIVE
========================================
*.log_archive_dest_2='service=orcl lgwr'
create following services in tnsnames.ora file
orcl =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.2.251)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = stdby)
)
)
*.FAL_SERVER='standby'
standby =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.2.251)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = stdby)
)
)
====================================
create init ora file for standby database
and create following directories as mention in init ora file
audit_file_dest
background_dump_dest
control_files
core_dump_dest
user_dump_dest
log_archive_dest_1
[apt-amd-02:oracle:stdby]$ cat init_stdby.txt
orcl.__db_cache_size=377487360
orcl.__java_pool_size=4194304
orcl.__large_pool_size=4194304
orcl.__shared_pool_size=146800640
orcl.__streams_pool_size=0
*.audit_file_dest='/opt/app/oracle/admin/stdby/adump'
*.background_dump_dest='/opt/app/oracle/admin/stdby/bdump'
*.compatible='10.2.0.1.0'
*.control_files='/opt/app/oracle/oradata/stdby/control_stdby.ctl'
*.core_dump_dest='/opt/app/oracle/admin/stdby/cdump'
*.db_block_size=8192
*.db_domain=''
*.db_file_multiblock_read_count=16
*.db_name='orcl'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=orclXDB)'
*.job_queue_processes=10
*.nls_language='ENGLISH'
*.nls_territory='UNITED KINGDOM'
*.open_cursors=300
*.pga_aggregate_target=199229440
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=536870912
*.undo_management='AUTO'
*.undo_tablespace='UNDOTBS1'
*.user_dump_dest='/opt/app/oracle/admin/stdby/udump'
*.log_archive_dest_1='LOCATION=/opt/app/oracle/archive MANDATORY'
*.db_file_name_convert='/opt/app/oracle/oradata/orcl/orcl' '/opt/app/oracle/oradata/stdby'
*.log_file_name_convert='/opt/app/oracle/oradata/orcl/orcl' '/opt/app/oracle/oradata/stdby'
*.fal_client='standby'
*.fal_server=''
*.instance_name='stdby'
*.DB_UNIQUE_NAME=stdby
*.remote_login_passwordfile=EXCLUSIVE
==================================
cd /opt/app/oracle/product/10.2/db/dbs
then create password file for each instance
orapwd file=orapworcl password=orcl entries=5 force=y
orapwd file=orapwstdby password=stdby entries=5 force=y
SQL> startup mount pfile='/home/oracle/init_orcl.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database mounted.
sql>alter database create standby controlfile as '/opt/app/oracle/oradata/stdby/control_stdby.ctl' ;
shutdown the main database i.e. orcl
and copy all the file to ....
/opt/app/oracle/oradata/orcl> cp *.* /opt/app/oracle/oradata/stdby/
Now time to start standby database .
ORACLE_SID=stdby
sqlplus / as sysdba
SQL> startup mount pfile='/home/oracle/init_stdby.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database mounted.
on standby database create standby logfiles it should be same as main database
ALTER DATABASE ADD STANDBY LOGFILE group 4 '/opt/app/oracle/oradata/stdby/redo01_stb.log' SIZE 50M;
ALTER DATABASE ADD STANDBY LOGFILE group 5 '/opt/app/oracle/oradata/stdby/redo02_stb.log' SIZE 50M;
ALTER DATABASE ADD STANDBY LOGFILE group 6 '/opt/app/oracle/oradata/stdby/redo03_stb.log' SIZE 50M;
SQL> !ps -ef|grep ora_
oracle 4027 1 0 10:35 ? 00:00:00 ora_pmon_stdby
oracle 4029 1 0 10:35 ? 00:00:00 ora_psp0_stdby
oracle 4031 1 0 10:35 ? 00:00:00 ora_mman_stdby
oracle 4033 1 0 10:35 ? 00:00:00 ora_dbw0_stdby
oracle 4035 1 0 10:35 ? 00:00:00 ora_lgwr_stdby
oracle 4037 1 0 10:35 ? 00:00:00 ora_ckpt_stdby
oracle 4039 1 0 10:35 ? 00:00:00 ora_smon_stdby
oracle 4041 1 0 10:35 ? 00:00:00 ora_reco_stdby
oracle 4043 1 0 10:35 ? 00:00:00 ora_cjq0_stdby
oracle 4046 1 0 10:35 ? 00:00:00 ora_mmon_stdby
oracle 4048 1 0 10:35 ? 00:00:00 ora_mmnl_stdby
oracle 4052 1 0 10:35 ? 00:00:00 ora_d000_stdby
oracle 4055 1 0 10:35 ? 00:00:00 ora_s000_stdby
oracle 4085 1 0 10:35 ? 00:00:00 ora_arc0_stdby
oracle 4087 1 0 10:35 ? 00:00:00 ora_arc1_stdby
SQL> alter database recover managed standby database disconnect from session;
Database altered.
SQL> !ps -ef|grep ora_
oracle 4027 1 0 10:35 ? 00:00:00 ora_pmon_stdby
oracle 4029 1 0 10:35 ? 00:00:00 ora_psp0_stdby
oracle 4031 1 0 10:35 ? 00:00:00 ora_mman_stdby
oracle 4033 1 0 10:35 ? 00:00:00 ora_dbw0_stdby
oracle 4035 1 0 10:35 ? 00:00:00 ora_lgwr_stdby
oracle 4037 1 0 10:35 ? 00:00:00 ora_ckpt_stdby
oracle 4039 1 0 10:35 ? 00:00:00 ora_smon_stdby
oracle 4041 1 0 10:35 ? 00:00:00 ora_reco_stdby
oracle 4043 1 0 10:35 ? 00:00:00 ora_cjq0_stdby
oracle 4046 1 0 10:35 ? 00:00:00 ora_mmon_stdby
oracle 4048 1 0 10:35 ? 00:00:00 ora_mmnl_stdby
oracle 4052 1 0 10:35 ? 00:00:00 ora_d000_stdby
oracle 4055 1 0 10:35 ? 00:00:00 ora_s000_stdby
oracle 4085 1 0 10:35 ? 00:00:00 ora_arc0_stdby
oracle 4087 1 0 10:35 ? 00:00:00 ora_arc1_stdby
oracle 6481 1 3 11:01 ? 00:00:00 ora_mrp0_stdby <<<<<<---- br="" process="" start="" this="" will="">
now standby is ready
If you need to stop log apply services:
SQL> alter database recover managed standby database cancel;
===
now start the main database
SQL> startup pfile='/home/oracle/init_orcl.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database open
PROTECTION_MODE : Protection mode set for the database
PROTECTION_LEVEL : Actual, real time mode in affect for the database; this will change after
outages/faults
Usage Notes
The redo data generated by a transaction must have been received by every enabled destination that has the SYNC attribute
before that transaction can commit.
The redo data generated by a transaction need not have been received at a destination that has the ASYNC attribute before that
transaction can commit. This is the default behavior if neither SYNC or ASYNC is specified.
AFFIRM and NOAFFIRM
Controls whether a redo transport destination acknowledges received redo data before or after writing it to the standby redo log:
AFFIRM—specifies that a redo transport destination acknowledges received redo data after writing it to the standby redo log.
NOAFFIRM—specifies that a redo transport destination acknowledges received redo data before writing it to the standby redo log.
the default is AFFIRM when the SYNC attribute is specified and NOAFFIRM when the ASYNC attribute is
PROTECTION_MODE : Protection mode set for the database
PROTECTION_LEVEL : Actual, real time mode in affect for the database; this will change after outages/faults
MAXIMUM AVAILABILITY is another ‘no-data-loss’ protection mode. It can be argued that it can be placed somewhere between the ‘no-data-loss’ and ‘minimal-data-loss’ category. The setup of maximum availability protection is the same as maximum protection. In MAXIMUM AVAILABILITY mode, the primary database does not halt if it cannot transmit redo data to at least one participating standby database. During this period the protection mode is switched to MAXIMUM PERFORMANCE, the lowest level of data protection.
When Data Guard is in MAXIMUM PERFORMANCE mode, data can potentially be lost if the primary database crashes and the redo logs have not been transferred to any archival destination. The protection mode is upgraded to MAXIMUM AVAILABILITY on next log switch if the log transfer service has resumed transmission of the redo data to at least one RAC Grid participating in maximum availability configuration and all the archive gap sequences have been resolved.
The redo data generated by a transaction must have been received by every enabled destination that has the SYNC attribute
before that transaction can commit.
* The redo data generated by a transaction need not have been received at a destination that has the ASYNC attribute before that
transaction can commit. This is the default behavior if neither SYNC or ASYNC is specified.
* FAL_CLIENT specifies the TNS network services name for the standby database (which is sent to the FAL server process by log apply services) that should be used by the FAL server process to connect to the standby database. The syntax would be:
FAL_CLIENT=
* FAL_SERVER specifies the TNS network service name that the standby database should use to connect to the FAL server
process. The syntax would be:
FAL_SERVER=
Note that the value provided for FAL_SERVER is not limited to defining only one FAL server; it can define multiple FAL servers (possible a FAL server that is running on another standby database) that are separated by commas as in the following example:
FAL_SERVER=primary_db,standby_db2
FAL server is a background process that is generally located on the primary database server (however it can also be found on
another standby database). FAL server is responsible for servicing incoming requests from the FAL client.
When the LGWR or ARC process on the primary initiates a connection with the standby, the standby listener responds by spawning a process called the remote file server (RFS). The RFS process will create a network connection with the processes on the primary and will sit waiting for data to arrive. Once data begins arriving from the primary, the RFS process will place it into either standby redo logs or archive redo logs. You should think of standby redo logs as exact twins to online redo logs, except for the fact that they are only active when a database is in the standby role. In essence, they are just a separate pool of redo logs. The RFS process will pick the first available standby redo log and begin placing changes into that log. When a log switch occurs on the primary, we switch standby redo logs and the RFS process will go to the next available standby redo log. The standby redo log that we were into prior to the log switch will be archived by the standby database and that archive will be applied by log apply services. Standby redo logs are wonderful things, and Oracle highly recommends that you use them. If you plan on setting one of the higher-level protection modes or plan on using real-time apply , the use of standby redo logs is mandatory.
Register a missing log file
alter database register physical logfile '';
If FAL doesn't work and it says the log is already registered
alter database register or replace physical logfile '';
alter database register or replace physical logfile '';
If that doesn't work, try this...
shutdown immediate
startup nomount
alter database mount standby database;
alter database recover automatic standby database;
==
Disable/Enable archive log destinations
alter system set log_archive_dest_state_2 = 'defer';
alter system set log_archive_dest_state_2 = 'enable';
SQL> SELECT PROCESS, CLIENT_PROCESS, SEQUENCE#, STATUS FROM V$MANAGED_STANDBY;
PROCESS CLIENT_P SEQUENCE# STATUS
------- -------- ---------- ------------
ARCH ARCH 0 CONNECTED
ARCH ARCH 0 CONNECTED
MRP0 N/A 204 WAIT_FOR_LOG
RFS LGWR 204 WRITING
RFS N/A 0 RECEIVING
Issue the following query to show information about the protection mode, the protection level, the role of the database, and switchover status:
set linesize 200
col DATABASE_ROLE format a20
col INSTANCE format a10
col OPEN_MODE format a15
col PROTECTION_MODE format a20
col PROTECTION_LEVEL format a20
col SWITCHOVER_STATUS format a20
SELECT DATABASE_ROLE, DB_UNIQUE_NAME INSTANCE, OPEN_MODE,PROTECTION_MODE, PROTECTION_LEVEL, SWITCHOVER_STATUS FROM V$DATABASE;
DATABASE_ROLE INSTANCE OPEN_MODE PROTECTION_MODE PROTECTION_LEVEL SWITCHOVER_STATUS
-------------------- ---------- --------------- -------------------- -------------------- --------------------
PRIMARY cccdb READ WRITE MAXIMUM PERFORMANCE MAXIMUM PERFORMANCE SESSIONS ACTIVE
Determining Which Log Files Were Not Received by the Standby Site
issue the following query on the primary database:
SQL> SELECT LOCAL.THREAD#, LOCAL.SEQUENCE# FROM
(SELECT THREAD#, SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=1) LOCAL
WHERE LOCAL.SEQUENCE# NOT IN
(SELECT SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=2 AND
THREAD# = LOCAL.THREAD#);
THREAD# SEQUENCE#
---------- ----------
1 12
1 13
1 14
See how up to date a physical standby is Run this on the primary
set numwidth 15
select max(sequence#) current_seq from v$log
/
Then run this on the standby
set numwidth 15
select max(applied_seq#) last_seq from v$archive_dest_status
/
Display info about all log destinations To be run on the primary
set lines 100
set numwidth 15
column ID format 99
column "SRLs" format 99
column active format 99
col type format a4
select ds.dest_id id
, ad.status
, ds.database_mode db_mode
, ad.archiver type
, ds.recovery_mode
, ds.protection_mode
, ds.standby_logfile_count "SRLs"
, ds.standby_logfile_active active
, ds.archived_seq#
from v$archive_dest_status ds
, v$archive_dest ad
where ds.dest_id = ad.dest_id
and ad.status != 'INACTIVE'
order by ds.dest_id
/
List any standby redo logs
set lines 100 pages 999
col member format a70
select st.group#
, st.sequence#
, ceil(st.bytes / 1048576) mb
, lf.member
from v$standby_log st
, v$logfile lf
where st.group# = lf.group#
/
Check which logs are missing Run this on the standby...
select local.thread#
, local.sequence# from
(select thread#
, sequence#
from v$archived_log
where dest_id=1) local
where local.sequence# not in
(select sequence#
from v$archived_log
where dest_id=2 and
thread# = local.thread#)
/
SQL> select name, open_mode, protection_mode, database_role, dataguard_broker, force_logging from v$database;
select 'alter database add standby logfile '''||
regexp_substr(MEMBER,'/.+/')||'stdby_'||
regexp_replace(member,regexp_substr(MEMBER,'/.+/'),'')||
''' size '||bytes||';' "Create Standby redo"
from v$logfile lf , v$log l
where l.group# = lf.group#
/
Dynamic SQL to which generates extra standby redo log following the naming convention, stdby_redo0Group#.rdo
======================================================
create ( statement ) standby redo log file ( statement )
select 'alter database add standby logfile '''||
regexp_substr(MEMBER,'/.+/')||'stdby_'||
regexp_replace(member,regexp_substr(MEMBER,'/.+/'),'')||
''' size '||bytes||';' "Create Standby redo"
from v$logfile lf , v$log l
where l.group# = lf.group#
union all
select 'alter database add standby logfile '''||
regexp_substr(MEMBER,'/.+/')||'stdby_redo0'||(select max(group#)+1 from v$log)||'.rdo'||
''' size '||bytes||';' "Create Standby redo"
from v$logfile lf , v$log l
where l.group# = lf.group#
and rownum <=2
/---->
<---- br="" process="" start="" this="" will="">
---->
---->
<---- br="" process="" start="" this="" will="">
---->
---->
<---- br="" process="" start="" this="" will="">please check this one !!---->
<---- br="" process="" start="" this="" will="">https://anuj-singh.blogspot.com/2017/12/rman-standby-on-same-host-from-backup.html---->
Creating a physical standby database on same host
Creating a physical standby database on same host
Oracle provides two types of standby databases:
1. Physical Standby Database
Archived redo log transferred from primary database will be directly applied to the standby database.
2. Logical Standby Database
we create SQL statements then these statements will be applied to stand by database.
The key advantage for logical standby databases is that they're opened read/write, even while they're in applied mode
It is important to identify unsupported database objects on the primary database before you create a logical standby database.
This is because changes made to unsupported datatypes, table, sequences,
or views on the primary database will not be propagated to
the logical standby database. Moreover, no error message will be returned.
SELECT DISTINCT OWNER,TABLE_NAME FROM DBA_LOGSTDBY_UNSUPPORTED ORDER BY OWNER,TABLE_NAME;
SELECT COLUMN_NAME,DATA_TYPE FROM DBA_LOGSTDBY_UNSUPPORTED WHERE OWNER='XXX' AND TABLE_NAME = 'XXXXX';
==================
We are creating physical standby database on same host
orcl instance is up and running on archive log mode .
init ora file for orcl instance
=================================
[apt-amd-02:oracle:orcl]$ cat init_orcl.txt
orcl.__db_cache_size=297795584
orcl.__java_pool_size=4194304
orcl.__large_pool_size=4194304
orcl.__shared_pool_size=130023424
orcl.__streams_pool_size=0
*.audit_file_dest='/opt/app/oracle/admin/orcl/adump'
*.background_dump_dest='/opt/app/oracle/admin/orcl/bdump'
*.compatible='10.2.0.1.0'
*.control_files='/opt/app/oracle/oradata/orcl/orcl/control01.ctl','/opt/app/oracle/oradata/orcl/orcl/control02.ctl','/opt/app/oracle/oradata/orcl/orcl/control03.ctl'
*.core_dump_dest='/opt/app/oracle/admin/orcl/cdump'
*.db_block_size=8192
*.db_domain=''
*.db_file_multiblock_read_count=16
*.db_name='orcl'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=orclXDB)'
*.job_queue_processes=10
*.nls_language='ENGLISH'
*.nls_territory='UNITED KINGDOM'
*.open_cursors=300
*.pga_aggregate_target=146800640
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=440401920
*.undo_management='AUTO'
*.undo_tablespace='UNDOTBS1'
*.user_dump_dest='/opt/app/oracle/admin/orcl/udump'
*.log_archive_dest_1='LOCATION=/opt/app/oracle/archive/ MANDATORY'
*.log_archive_dest_2='service=orcl lgwr'
*.FAL_SERVER='standby'
*.FAL_CLIENT='orcl'
*.STANDBY_FILE_MANAGEMENT=AUTO
*.remote_login_passwordfile=EXCLUSIVE
========================================
*.log_archive_dest_2='service=orcl lgwr'
create following services in tnsnames.ora file
orcl =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.2.251)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = stdby)
)
)
*.FAL_SERVER='standby'
standby =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.2.251)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = stdby)
)
)
====================================
create init ora file for standby database
and create following directories as mention in init ora file
audit_file_dest
background_dump_dest
control_files
core_dump_dest
user_dump_dest
log_archive_dest_1
[apt-amd-02:oracle:stdby]$ cat init_stdby.txt
orcl.__db_cache_size=377487360
orcl.__java_pool_size=4194304
orcl.__large_pool_size=4194304
orcl.__shared_pool_size=146800640
orcl.__streams_pool_size=0
*.audit_file_dest='/opt/app/oracle/admin/stdby/adump'
*.background_dump_dest='/opt/app/oracle/admin/stdby/bdump'
*.compatible='10.2.0.1.0'
*.control_files='/opt/app/oracle/oradata/stdby/control_stdby.ctl'
*.core_dump_dest='/opt/app/oracle/admin/stdby/cdump'
*.db_block_size=8192
*.db_domain=''
*.db_file_multiblock_read_count=16
*.db_name='orcl'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=orclXDB)'
*.job_queue_processes=10
*.nls_language='ENGLISH'
*.nls_territory='UNITED KINGDOM'
*.open_cursors=300
*.pga_aggregate_target=199229440
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=536870912
*.undo_management='AUTO'
*.undo_tablespace='UNDOTBS1'
*.user_dump_dest='/opt/app/oracle/admin/stdby/udump'
*.log_archive_dest_1='LOCATION=/opt/app/oracle/archive MANDATORY'
*.db_file_name_convert='/opt/app/oracle/oradata/orcl/orcl' '/opt/app/oracle/oradata/stdby'
*.log_file_name_convert='/opt/app/oracle/oradata/orcl/orcl' '/opt/app/oracle/oradata/stdby'
*.fal_client='standby'
*.fal_server=''
*.instance_name='stdby'
*.DB_UNIQUE_NAME=stdby
*.remote_login_passwordfile=EXCLUSIVE
==================================
cd /opt/app/oracle/product/10.2/db/dbs
then create password file for each instance
orapwd file=orapworcl password=orcl entries=5 force=y
orapwd file=orapwstdby password=stdby entries=5 force=y
SQL> startup mount pfile='/home/oracle/init_orcl.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database mounted.
sql>alter database create standby controlfile as '/opt/app/oracle/oradata/stdby/control_stdby.ctl' ;
shutdown the main database i.e. orcl
and copy all the file to ....
/opt/app/oracle/oradata/orcl> cp *.* /opt/app/oracle/oradata/stdby/
Now time to start standby database .
ORACLE_SID=stdby
sqlplus / as sysdba
SQL> startup mount pfile='/home/oracle/init_stdby.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database mounted.
on standby database create standby logfiles it should be same as main database
ALTER DATABASE ADD STANDBY LOGFILE group 4 '/opt/app/oracle/oradata/stdby/redo01_stb.log' SIZE 50M;
ALTER DATABASE ADD STANDBY LOGFILE group 5 '/opt/app/oracle/oradata/stdby/redo02_stb.log' SIZE 50M;
ALTER DATABASE ADD STANDBY LOGFILE group 6 '/opt/app/oracle/oradata/stdby/redo03_stb.log' SIZE 50M;
SQL> !ps -ef|grep ora_
oracle 4027 1 0 10:35 ? 00:00:00 ora_pmon_stdby
oracle 4029 1 0 10:35 ? 00:00:00 ora_psp0_stdby
oracle 4031 1 0 10:35 ? 00:00:00 ora_mman_stdby
oracle 4033 1 0 10:35 ? 00:00:00 ora_dbw0_stdby
oracle 4035 1 0 10:35 ? 00:00:00 ora_lgwr_stdby
oracle 4037 1 0 10:35 ? 00:00:00 ora_ckpt_stdby
oracle 4039 1 0 10:35 ? 00:00:00 ora_smon_stdby
oracle 4041 1 0 10:35 ? 00:00:00 ora_reco_stdby
oracle 4043 1 0 10:35 ? 00:00:00 ora_cjq0_stdby
oracle 4046 1 0 10:35 ? 00:00:00 ora_mmon_stdby
oracle 4048 1 0 10:35 ? 00:00:00 ora_mmnl_stdby
oracle 4052 1 0 10:35 ? 00:00:00 ora_d000_stdby
oracle 4055 1 0 10:35 ? 00:00:00 ora_s000_stdby
oracle 4085 1 0 10:35 ? 00:00:00 ora_arc0_stdby
oracle 4087 1 0 10:35 ? 00:00:00 ora_arc1_stdby
SQL> alter database recover managed standby database disconnect from session;
Database altered.
SQL> !ps -ef|grep ora_
oracle 4027 1 0 10:35 ? 00:00:00 ora_pmon_stdby
oracle 4029 1 0 10:35 ? 00:00:00 ora_psp0_stdby
oracle 4031 1 0 10:35 ? 00:00:00 ora_mman_stdby
oracle 4033 1 0 10:35 ? 00:00:00 ora_dbw0_stdby
oracle 4035 1 0 10:35 ? 00:00:00 ora_lgwr_stdby
oracle 4037 1 0 10:35 ? 00:00:00 ora_ckpt_stdby
oracle 4039 1 0 10:35 ? 00:00:00 ora_smon_stdby
oracle 4041 1 0 10:35 ? 00:00:00 ora_reco_stdby
oracle 4043 1 0 10:35 ? 00:00:00 ora_cjq0_stdby
oracle 4046 1 0 10:35 ? 00:00:00 ora_mmon_stdby
oracle 4048 1 0 10:35 ? 00:00:00 ora_mmnl_stdby
oracle 4052 1 0 10:35 ? 00:00:00 ora_d000_stdby
oracle 4055 1 0 10:35 ? 00:00:00 ora_s000_stdby
oracle 4085 1 0 10:35 ? 00:00:00 ora_arc0_stdby
oracle 4087 1 0 10:35 ? 00:00:00 ora_arc1_stdby
oracle 6481 1 3 11:01 ? 00:00:00 ora_mrp0_stdby <<<<<<---- this process will start
now standby is ready
If you need to stop log apply services:
SQL> alter database recover managed standby database cancel;
===
now start the main database
SQL> startup pfile='/home/oracle/init_orcl.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database open
PROTECTION_MODE : Protection mode set for the database
PROTECTION_LEVEL : Actual, real time mode in affect for the database; this will change after outages/faults
Usage Notes
The redo data generated by a transaction must have been received by every enabled destination that has the SYNC attribute
before that transaction can commit.
The redo data generated by a transaction need not have been received at a destination that has the ASYNC attribute before that
transaction can commit. This is the default behavior if neither SYNC or ASYNC is specified.
AFFIRM and NOAFFIRM
Controls whether a redo transport destination acknowledges received redo data before or after writing it to the standby redo log:
AFFIRM—specifies that a redo transport destination acknowledges received redo data after writing it to the standby redo log.
NOAFFIRM—specifies that a redo transport destination acknowledges received redo data before writing it to the standby redo log.
the default is AFFIRM when the SYNC attribute is specified and NOAFFIRM when the ASYNC attribute is
PROTECTION_MODE : Protection mode set for the database
PROTECTION_LEVEL : Actual, real time mode in affect for the database; this will change after outages/faults
MAXIMUM AVAILABILITY is another ‘no-data-loss’ protection mode. It can be argued that it can be placed somewhere between the ‘no-data-loss’ and ‘minimal-data-loss’ category. The setup of maximum availability protection is the same as maximum protection. In MAXIMUM AVAILABILITY mode, the primary database does not halt if it cannot transmit redo data to at least one participating standby database. During this period the protection mode is switched to MAXIMUM PERFORMANCE, the lowest level of data protection.
When Data Guard is in MAXIMUM PERFORMANCE mode, data can potentially be lost if the primary database crashes and the redo logs have not been transferred to any archival destination. The protection mode is upgraded to MAXIMUM AVAILABILITY on next log switch if the log transfer service has resumed transmission of the redo data to at least one RAC Grid participating in maximum availability configuration and all the archive gap sequences have been resolved.
The redo data generated by a transaction must have been received by every enabled destination that has the SYNC attribute
before that transaction can commit.
* The redo data generated by a transaction need not have been received at a destination that has the ASYNC attribute before that
transaction can commit. This is the default behavior if neither SYNC or ASYNC is specified.
* FAL_CLIENT specifies the TNS network services name for the standby database (which is sent to the FAL server process by log apply services) that should be used by the FAL server process to connect to the standby database. The syntax would be:
FAL_CLIENT=
* FAL_SERVER specifies the TNS network service name that the standby database should use to connect to the FAL server
process. The syntax would be:
FAL_SERVER=
Note that the value provided for FAL_SERVER is not limited to defining only one FAL server; it can define multiple FAL servers (possible a FAL server that is running on another standby database) that are separated by commas as in the following example:
FAL_SERVER=primary_db,standby_db2
FAL server is a background process that is generally located on the primary database server (however it can also be found on
another standby database). FAL server is responsible for servicing incoming requests from the FAL client.
When the LGWR or ARC process on the primary initiates a connection with the standby, the standby listener responds by spawning a process called the remote file server (RFS). The RFS process will create a network connection with the processes on the primary and will sit waiting for data to arrive. Once data begins arriving from the primary, the RFS process will place it into either standby redo logs or archive redo logs. You should think of standby redo logs as exact twins to online redo logs, except for the fact that they are only active when a database is in the standby role. In essence, they are just a separate pool of redo logs. The RFS process will pick the first available standby redo log and begin placing changes into that log. When a log switch occurs on the primary, we switch standby redo logs and the RFS process will go to the next available standby redo log. The standby redo log that we were into prior to the log switch will be archived by the standby database and that archive will be applied by log apply services. Standby redo logs are wonderful things, and Oracle highly recommends that you use them. If you plan on setting one of the higher-level protection modes or plan on using real-time apply , the use of standby redo logs is mandatory.
Register a missing log file
alter database register physical logfile '';
If FAL doesn't work and it says the log is already registered
alter database register or replace physical logfile '';
alter database register or replace physical logfile '';
If that doesn't work, try this...
shutdown immediate
startup nomount
alter database mount standby database;
alter database recover automatic standby database;
==
Disable/Enable archive log destinations
alter system set log_archive_dest_state_2 = 'defer';
alter system set log_archive_dest_state_2 = 'enable';
SQL> SELECT PROCESS, CLIENT_PROCESS, SEQUENCE#, STATUS FROM V$MANAGED_STANDBY;
PROCESS CLIENT_P SEQUENCE# STATUS
------- -------- ---------- ------------
ARCH ARCH 0 CONNECTED
ARCH ARCH 0 CONNECTED
MRP0 N/A 204 WAIT_FOR_LOG
RFS LGWR 204 WRITING
RFS N/A 0 RECEIVING
Issue the following query to show information about the protection mode, the protection level, the role of the database, and switchover status:
set linesize 200
col DATABASE_ROLE format a20
col INSTANCE format a10
col OPEN_MODE format a15
col PROTECTION_MODE format a20
col PROTECTION_LEVEL format a20
col SWITCHOVER_STATUS format a20
SELECT DATABASE_ROLE, DB_UNIQUE_NAME INSTANCE, OPEN_MODE,PROTECTION_MODE, PROTECTION_LEVEL, SWITCHOVER_STATUS FROM V$DATABASE;
DATABASE_ROLE INSTANCE OPEN_MODE PROTECTION_MODE PROTECTION_LEVEL SWITCHOVER_STATUS
-------------------- ---------- --------------- -------------------- -------------------- --------------------
PRIMARY cccdb READ WRITE MAXIMUM PERFORMANCE MAXIMUM PERFORMANCE SESSIONS ACTIVE
Determining Which Log Files Were Not Received by the Standby Site
issue the following query on the primary database:
SQL> SELECT LOCAL.THREAD#, LOCAL.SEQUENCE# FROM
(SELECT THREAD#, SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=1) LOCAL
WHERE LOCAL.SEQUENCE# NOT IN
(SELECT SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=2 AND
THREAD# = LOCAL.THREAD#);
THREAD# SEQUENCE#
---------- ----------
1 12
1 13
1 14
See how up to date a physical standby is Run this on the primary
set numwidth 15
select max(sequence#) current_seq from v$log
/
Then run this on the standby
set numwidth 15
select max(applied_seq#) last_seq from v$archive_dest_status
/
Display info about all log destinations To be run on the primary
set lines 100
set numwidth 15
column ID format 99
column "SRLs" format 99
column active format 99
col type format a4
select ds.dest_id id
, ad.status
, ds.database_mode db_mode
, ad.archiver type
, ds.recovery_mode
, ds.protection_mode
, ds.standby_logfile_count "SRLs"
, ds.standby_logfile_active active
, ds.archived_seq#
from v$archive_dest_status ds
, v$archive_dest ad
where ds.dest_id = ad.dest_id
and ad.status != 'INACTIVE'
order by ds.dest_id
/
List any standby redo logs
set lines 100 pages 999
col member format a70
select st.group#
, st.sequence#
, ceil(st.bytes / 1048576) mb
, lf.member
from v$standby_log st
, v$logfile lf
where st.group# = lf.group#
/
Check which logs are missing Run this on the standby...
select local.thread#
, local.sequence# from
(select thread#
, sequence#
from v$archived_log
where dest_id=1) local
where local.sequence# not in
(select sequence#
from v$archived_log
where dest_id=2 and
thread# = local.thread#)
/
SQL> select name, open_mode, protection_mode, database_role, dataguard_broker, force_logging from v$database;
Oracle provides two types of standby databases:
1. Physical Standby Database
Archived redo log transferred from primary database will be directly applied to the standby database.
2. Logical Standby Database
we create SQL statements then these statements will be applied to stand by database.
The key advantage for logical standby databases is that they're opened read/write, even while they're in applied mode
It is important to identify unsupported database objects on the primary database before you create a logical standby database.
This is because changes made to unsupported datatypes, table, sequences,
or views on the primary database will not be propagated to
the logical standby database. Moreover, no error message will be returned.
SELECT DISTINCT OWNER,TABLE_NAME FROM DBA_LOGSTDBY_UNSUPPORTED ORDER BY OWNER,TABLE_NAME;
SELECT COLUMN_NAME,DATA_TYPE FROM DBA_LOGSTDBY_UNSUPPORTED WHERE OWNER='XXX' AND TABLE_NAME = 'XXXXX';
==================
We are creating physical standby database on same host
orcl instance is up and running on archive log mode .
init ora file for orcl instance
=================================
[apt-amd-02:oracle:orcl]$ cat init_orcl.txt
orcl.__db_cache_size=297795584
orcl.__java_pool_size=4194304
orcl.__large_pool_size=4194304
orcl.__shared_pool_size=130023424
orcl.__streams_pool_size=0
*.audit_file_dest='/opt/app/oracle/admin/orcl/adump'
*.background_dump_dest='/opt/app/oracle/admin/orcl/bdump'
*.compatible='10.2.0.1.0'
*.control_files='/opt/app/oracle/oradata/orcl/orcl/control01.ctl','/opt/app/oracle/oradata/orcl/orcl/control02.ctl','/opt/app/oracle/oradata/orcl/orcl/control03.ctl'
*.core_dump_dest='/opt/app/oracle/admin/orcl/cdump'
*.db_block_size=8192
*.db_domain=''
*.db_file_multiblock_read_count=16
*.db_name='orcl'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=orclXDB)'
*.job_queue_processes=10
*.nls_language='ENGLISH'
*.nls_territory='UNITED KINGDOM'
*.open_cursors=300
*.pga_aggregate_target=146800640
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=440401920
*.undo_management='AUTO'
*.undo_tablespace='UNDOTBS1'
*.user_dump_dest='/opt/app/oracle/admin/orcl/udump'
*.log_archive_dest_1='LOCATION=/opt/app/oracle/archive/ MANDATORY'
*.log_archive_dest_2='service=orcl lgwr'
*.FAL_SERVER='standby'
*.FAL_CLIENT='orcl'
*.STANDBY_FILE_MANAGEMENT=AUTO
*.remote_login_passwordfile=EXCLUSIVE
========================================
*.log_archive_dest_2='service=orcl lgwr'
create following services in tnsnames.ora file
orcl =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.2.251)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = stdby)
)
)
*.FAL_SERVER='standby'
standby =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.2.251)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = stdby)
)
)
====================================
create init ora file for standby database
and create following directories as mention in init ora file
audit_file_dest
background_dump_dest
control_files
core_dump_dest
user_dump_dest
log_archive_dest_1
[apt-amd-02:oracle:stdby]$ cat init_stdby.txt
orcl.__db_cache_size=377487360
orcl.__java_pool_size=4194304
orcl.__large_pool_size=4194304
orcl.__shared_pool_size=146800640
orcl.__streams_pool_size=0
*.audit_file_dest='/opt/app/oracle/admin/stdby/adump'
*.background_dump_dest='/opt/app/oracle/admin/stdby/bdump'
*.compatible='10.2.0.1.0'
*.control_files='/opt/app/oracle/oradata/stdby/control_stdby.ctl'
*.core_dump_dest='/opt/app/oracle/admin/stdby/cdump'
*.db_block_size=8192
*.db_domain=''
*.db_file_multiblock_read_count=16
*.db_name='orcl'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=orclXDB)'
*.job_queue_processes=10
*.nls_language='ENGLISH'
*.nls_territory='UNITED KINGDOM'
*.open_cursors=300
*.pga_aggregate_target=199229440
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=536870912
*.undo_management='AUTO'
*.undo_tablespace='UNDOTBS1'
*.user_dump_dest='/opt/app/oracle/admin/stdby/udump'
*.log_archive_dest_1='LOCATION=/opt/app/oracle/archive MANDATORY'
*.db_file_name_convert='/opt/app/oracle/oradata/orcl/orcl' '/opt/app/oracle/oradata/stdby'
*.log_file_name_convert='/opt/app/oracle/oradata/orcl/orcl' '/opt/app/oracle/oradata/stdby'
*.fal_client='standby'
*.fal_server=''
*.instance_name='stdby'
*.DB_UNIQUE_NAME=stdby
*.remote_login_passwordfile=EXCLUSIVE
==================================
cd /opt/app/oracle/product/10.2/db/dbs
then create password file for each instance
orapwd file=orapworcl password=orcl entries=5 force=y
orapwd file=orapwstdby password=stdby entries=5 force=y
SQL> startup mount pfile='/home/oracle/init_orcl.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database mounted.
sql>alter database create standby controlfile as '/opt/app/oracle/oradata/stdby/control_stdby.ctl' ;
shutdown the main database i.e. orcl
and copy all the file to ....
/opt/app/oracle/oradata/orcl> cp *.* /opt/app/oracle/oradata/stdby/
Now time to start standby database .
ORACLE_SID=stdby
sqlplus / as sysdba
SQL> startup mount pfile='/home/oracle/init_stdby.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database mounted.
on standby database create standby logfiles it should be same as main database
ALTER DATABASE ADD STANDBY LOGFILE group 4 '/opt/app/oracle/oradata/stdby/redo01_stb.log' SIZE 50M;
ALTER DATABASE ADD STANDBY LOGFILE group 5 '/opt/app/oracle/oradata/stdby/redo02_stb.log' SIZE 50M;
ALTER DATABASE ADD STANDBY LOGFILE group 6 '/opt/app/oracle/oradata/stdby/redo03_stb.log' SIZE 50M;
SQL> !ps -ef|grep ora_
oracle 4027 1 0 10:35 ? 00:00:00 ora_pmon_stdby
oracle 4029 1 0 10:35 ? 00:00:00 ora_psp0_stdby
oracle 4031 1 0 10:35 ? 00:00:00 ora_mman_stdby
oracle 4033 1 0 10:35 ? 00:00:00 ora_dbw0_stdby
oracle 4035 1 0 10:35 ? 00:00:00 ora_lgwr_stdby
oracle 4037 1 0 10:35 ? 00:00:00 ora_ckpt_stdby
oracle 4039 1 0 10:35 ? 00:00:00 ora_smon_stdby
oracle 4041 1 0 10:35 ? 00:00:00 ora_reco_stdby
oracle 4043 1 0 10:35 ? 00:00:00 ora_cjq0_stdby
oracle 4046 1 0 10:35 ? 00:00:00 ora_mmon_stdby
oracle 4048 1 0 10:35 ? 00:00:00 ora_mmnl_stdby
oracle 4052 1 0 10:35 ? 00:00:00 ora_d000_stdby
oracle 4055 1 0 10:35 ? 00:00:00 ora_s000_stdby
oracle 4085 1 0 10:35 ? 00:00:00 ora_arc0_stdby
oracle 4087 1 0 10:35 ? 00:00:00 ora_arc1_stdby
SQL> alter database recover managed standby database disconnect from session;
Database altered.
SQL> !ps -ef|grep ora_
oracle 4027 1 0 10:35 ? 00:00:00 ora_pmon_stdby
oracle 4029 1 0 10:35 ? 00:00:00 ora_psp0_stdby
oracle 4031 1 0 10:35 ? 00:00:00 ora_mman_stdby
oracle 4033 1 0 10:35 ? 00:00:00 ora_dbw0_stdby
oracle 4035 1 0 10:35 ? 00:00:00 ora_lgwr_stdby
oracle 4037 1 0 10:35 ? 00:00:00 ora_ckpt_stdby
oracle 4039 1 0 10:35 ? 00:00:00 ora_smon_stdby
oracle 4041 1 0 10:35 ? 00:00:00 ora_reco_stdby
oracle 4043 1 0 10:35 ? 00:00:00 ora_cjq0_stdby
oracle 4046 1 0 10:35 ? 00:00:00 ora_mmon_stdby
oracle 4048 1 0 10:35 ? 00:00:00 ora_mmnl_stdby
oracle 4052 1 0 10:35 ? 00:00:00 ora_d000_stdby
oracle 4055 1 0 10:35 ? 00:00:00 ora_s000_stdby
oracle 4085 1 0 10:35 ? 00:00:00 ora_arc0_stdby
oracle 4087 1 0 10:35 ? 00:00:00 ora_arc1_stdby
oracle 6481 1 3 11:01 ? 00:00:00 ora_mrp0_stdby <<<<<<---- this process will start
now standby is ready
If you need to stop log apply services:
SQL> alter database recover managed standby database cancel;
===
now start the main database
SQL> startup pfile='/home/oracle/init_orcl.txt' ;
ORACLE instance started.
Total System Global Area 440401920 bytes
Fixed Size 2021408 bytes
Variable Size 138414048 bytes
Database Buffers 297795584 bytes
Redo Buffers 2170880 bytes
Database open
PROTECTION_MODE : Protection mode set for the database
PROTECTION_LEVEL : Actual, real time mode in affect for the database; this will change after outages/faults
Usage Notes
The redo data generated by a transaction must have been received by every enabled destination that has the SYNC attribute
before that transaction can commit.
The redo data generated by a transaction need not have been received at a destination that has the ASYNC attribute before that
transaction can commit. This is the default behavior if neither SYNC or ASYNC is specified.
AFFIRM and NOAFFIRM
Controls whether a redo transport destination acknowledges received redo data before or after writing it to the standby redo log:
AFFIRM—specifies that a redo transport destination acknowledges received redo data after writing it to the standby redo log.
NOAFFIRM—specifies that a redo transport destination acknowledges received redo data before writing it to the standby redo log.
the default is AFFIRM when the SYNC attribute is specified and NOAFFIRM when the ASYNC attribute is
PROTECTION_MODE : Protection mode set for the database
PROTECTION_LEVEL : Actual, real time mode in affect for the database; this will change after outages/faults
MAXIMUM AVAILABILITY is another ‘no-data-loss’ protection mode. It can be argued that it can be placed somewhere between the ‘no-data-loss’ and ‘minimal-data-loss’ category. The setup of maximum availability protection is the same as maximum protection. In MAXIMUM AVAILABILITY mode, the primary database does not halt if it cannot transmit redo data to at least one participating standby database. During this period the protection mode is switched to MAXIMUM PERFORMANCE, the lowest level of data protection.
When Data Guard is in MAXIMUM PERFORMANCE mode, data can potentially be lost if the primary database crashes and the redo logs have not been transferred to any archival destination. The protection mode is upgraded to MAXIMUM AVAILABILITY on next log switch if the log transfer service has resumed transmission of the redo data to at least one RAC Grid participating in maximum availability configuration and all the archive gap sequences have been resolved.
The redo data generated by a transaction must have been received by every enabled destination that has the SYNC attribute
before that transaction can commit.
* The redo data generated by a transaction need not have been received at a destination that has the ASYNC attribute before that
transaction can commit. This is the default behavior if neither SYNC or ASYNC is specified.
* FAL_CLIENT specifies the TNS network services name for the standby database (which is sent to the FAL server process by log apply services) that should be used by the FAL server process to connect to the standby database. The syntax would be:
FAL_CLIENT=
* FAL_SERVER specifies the TNS network service name that the standby database should use to connect to the FAL server
process. The syntax would be:
FAL_SERVER=
Note that the value provided for FAL_SERVER is not limited to defining only one FAL server; it can define multiple FAL servers (possible a FAL server that is running on another standby database) that are separated by commas as in the following example:
FAL_SERVER=primary_db,standby_db2
FAL server is a background process that is generally located on the primary database server (however it can also be found on
another standby database). FAL server is responsible for servicing incoming requests from the FAL client.
When the LGWR or ARC process on the primary initiates a connection with the standby, the standby listener responds by spawning a process called the remote file server (RFS). The RFS process will create a network connection with the processes on the primary and will sit waiting for data to arrive. Once data begins arriving from the primary, the RFS process will place it into either standby redo logs or archive redo logs. You should think of standby redo logs as exact twins to online redo logs, except for the fact that they are only active when a database is in the standby role. In essence, they are just a separate pool of redo logs. The RFS process will pick the first available standby redo log and begin placing changes into that log. When a log switch occurs on the primary, we switch standby redo logs and the RFS process will go to the next available standby redo log. The standby redo log that we were into prior to the log switch will be archived by the standby database and that archive will be applied by log apply services. Standby redo logs are wonderful things, and Oracle highly recommends that you use them. If you plan on setting one of the higher-level protection modes or plan on using real-time apply , the use of standby redo logs is mandatory.
Register a missing log file
alter database register physical logfile '
If FAL doesn't work and it says the log is already registered
alter database register or replace physical logfile '
alter database register or replace physical logfile '
If that doesn't work, try this...
shutdown immediate
startup nomount
alter database mount standby database;
alter database recover automatic standby database;
==
Disable/Enable archive log destinations
alter system set log_archive_dest_state_2 = 'defer';
alter system set log_archive_dest_state_2 = 'enable';
SQL> SELECT PROCESS, CLIENT_PROCESS, SEQUENCE#, STATUS FROM V$MANAGED_STANDBY;
PROCESS CLIENT_P SEQUENCE# STATUS
------- -------- ---------- ------------
ARCH ARCH 0 CONNECTED
ARCH ARCH 0 CONNECTED
MRP0 N/A 204 WAIT_FOR_LOG
RFS LGWR 204 WRITING
RFS N/A 0 RECEIVING
Issue the following query to show information about the protection mode, the protection level, the role of the database, and switchover status:
set linesize 200
col DATABASE_ROLE format a20
col INSTANCE format a10
col OPEN_MODE format a15
col PROTECTION_MODE format a20
col PROTECTION_LEVEL format a20
col SWITCHOVER_STATUS format a20
SELECT DATABASE_ROLE, DB_UNIQUE_NAME INSTANCE, OPEN_MODE,PROTECTION_MODE, PROTECTION_LEVEL, SWITCHOVER_STATUS FROM V$DATABASE;
DATABASE_ROLE INSTANCE OPEN_MODE PROTECTION_MODE PROTECTION_LEVEL SWITCHOVER_STATUS
-------------------- ---------- --------------- -------------------- -------------------- --------------------
PRIMARY cccdb READ WRITE MAXIMUM PERFORMANCE MAXIMUM PERFORMANCE SESSIONS ACTIVE
Determining Which Log Files Were Not Received by the Standby Site
issue the following query on the primary database:
SQL> SELECT LOCAL.THREAD#, LOCAL.SEQUENCE# FROM
(SELECT THREAD#, SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=1) LOCAL
WHERE LOCAL.SEQUENCE# NOT IN
(SELECT SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=2 AND
THREAD# = LOCAL.THREAD#);
THREAD# SEQUENCE#
---------- ----------
1 12
1 13
1 14
See how up to date a physical standby is Run this on the primary
set numwidth 15
select max(sequence#) current_seq from v$log
/
Then run this on the standby
set numwidth 15
select max(applied_seq#) last_seq from v$archive_dest_status
/
Display info about all log destinations To be run on the primary
set lines 100
set numwidth 15
column ID format 99
column "SRLs" format 99
column active format 99
col type format a4
select ds.dest_id id
, ad.status
, ds.database_mode db_mode
, ad.archiver type
, ds.recovery_mode
, ds.protection_mode
, ds.standby_logfile_count "SRLs"
, ds.standby_logfile_active active
, ds.archived_seq#
from v$archive_dest_status ds
, v$archive_dest ad
where ds.dest_id = ad.dest_id
and ad.status != 'INACTIVE'
order by ds.dest_id
/
List any standby redo logs
set lines 100 pages 999
col member format a70
select st.group#
, st.sequence#
, ceil(st.bytes / 1048576) mb
, lf.member
from v$standby_log st
, v$logfile lf
where st.group# = lf.group#
/
Check which logs are missing Run this on the standby...
select local.thread#
, local.sequence# from
(select thread#
, sequence#
from v$archived_log
where dest_id=1) local
where local.sequence# not in
(select sequence#
from v$archived_log
where dest_id=2 and
thread# = local.thread#)
/
SQL> select name, open_mode, protection_mode, database_role, dataguard_broker, force_logging from v$database;
Friday, 9 October 2009
Oracles statistics for CBO ( Gathering statistics )
Optimizer statistics for use by the Cost Based Optimizer (CBO)
To achieve a quick delete and recreate of the statistics on an individual table and it's indexes
(adding column statistics for any skewed columns)
for delete
exec dbms_stats.delete_table_stats(ownname=>'user_name',tabname=>'table_name',cascade_indexes=>true);
exec dbms_stats.gather_table_stats(ownname=>'user_name',-
tabname=>'table_name',-
estimate_percent => 100,-
cascade=>true,-
method_opt=>'for all columns size skewonly');
generate statistics with as much statistical accuracy as possible,100% sample sizes are suggested since any reduction in sample size is always a concession to accuracy
100% samples are potentially time consuming and consideration needs to be made to fit the statistics gathering activities within the existing maintenance window
exec dbms_stats.gather_table_stats( -
ownname => ' Schema_name ', -
tabname => ' Table_name ', -
estimate_percent => 100, -
cascade => TRUE, -
method_opt => 'FOR ALL COLUMNS SIZE 1' );
method_opt
ESTIMATE_PERCENT: defaults:
9i : "FOR ALL COLUMNS SIZE 1" effectively no detailed column statistics.
10g : DBMS_STATS.AUTO_SAMPLE_SIZE (using very small estimate percentage)
11g : DBMS_STATS.AUTO_SAMPLE_SIZE (using larger estimate percentage - 100%)
For cases where column data is known to be skewed and column statistics are known to be beneficial, Replace:
method_opt => 'FOR ALL COLUMNS SIZE 1'
with
method_opt => 'FOR ALL COLUMNS SIZE AUTO'
or with
method_opt => 'FOR ALL COLUMNS SIZE SKEWONLY'
To achieve a quick delete and recreate of the statistics on an individual table and it's indexes
(adding column statistics for any skewed columns)
for delete
exec dbms_stats.delete_table_stats(ownname=>'user_name',tabname=>'table_name',cascade_indexes=>true);
exec dbms_stats.gather_table_stats(ownname=>'user_name',-
tabname=>'table_name',-
estimate_percent => 100,-
cascade=>true,-
method_opt=>'for all columns size skewonly');
generate statistics with as much statistical accuracy as possible,100% sample sizes are suggested since any reduction in sample size is always a concession to accuracy
100% samples are potentially time consuming and consideration needs to be made to fit the statistics gathering activities within the existing maintenance window
exec dbms_stats.gather_table_stats( -
ownname => ' Schema_name ', -
tabname => ' Table_name ', -
estimate_percent => 100, -
cascade => TRUE, -
method_opt => 'FOR ALL COLUMNS SIZE 1' );
method_opt
ESTIMATE_PERCENT: defaults:
9i : "FOR ALL COLUMNS SIZE 1" effectively no detailed column statistics.
10g : DBMS_STATS.AUTO_SAMPLE_SIZE (using very small estimate percentage)
11g : DBMS_STATS.AUTO_SAMPLE_SIZE (using larger estimate percentage - 100%)
For cases where column data is known to be skewed and column statistics are known to be beneficial, Replace:
method_opt => 'FOR ALL COLUMNS SIZE 1'
with
method_opt => 'FOR ALL COLUMNS SIZE AUTO'
or with
method_opt => 'FOR ALL COLUMNS SIZE SKEWONLY'
Wednesday, 7 October 2009
Startup Oracle 10g and 11g Database In Archivelog Mode and flashback mode
Startup Oracle 10g and 11g Database In Archivelog Mode and flashback mode.
In this case "orcl " is the instance name .
-bash-3.00$ sqlplus / as sysdba
SQL*Plus: Release 10.2.0.4.0 - Production on Wed Oct 7 09:45:59 2009
Copyright (c) 1982, 2007, Oracle. All Rights Reserved.
Connected to an idle instance.
SYS AS SYSDBA>!echo $ORACLE_SID
orcl
sql> create pfile='/tmp/init_orcl.txt' from spfile ;
sql> shutdown immediate;
Then edit this file vi /tmp/init_orcl.txt
==============================================
*.log_archive_format="log%r_%t_%s.arc"
-- <<<<< Oracle 10g and above this parameter must with %r %t %s these value
*.log_archive_dest_1='LOCATION="/opt/oracle/archive'
*.db_recovery_file_dest='/opt/oracle/flash_recovery_area'
*.db_recovery_file_dest_size="2G"
*.db_flashback_retention_target="1440"
===============================================
LOG_ARCHIVE_FORMAT must be in the format: %s,%t,%r.
%s log sequence number
%S log sequence number, zero filled
%t Thread number
%T Thread number, zero filled
%a activation ID
%d database ID
%r resetlogs ID that ensures unique names are constructed for the archived log files across multiple incarnations of the database
SQL> startup mount pfile='/tmp/init_orcl.txt' ;
ORACLE instance started.
Total System Global Area 2634022912 bytes
Fixed Size 2042912 bytes
Variable Size 385881056 bytes
Database Buffers 2231369728 bytes
Redo Buffers 14729216 bytes
Database mounted.
sql> alter database archivelog;
Database altered.
sql>alter database flashback on;
Database altered.
sql> alter database open;
Database altered.
SYS AS SYSDBA>archive log list
Database log mode Archive Mode
Automatic archival Enabled
Archive destination /opt/oracle/archive
Oldest online log sequence 205
Next log sequence to archive 207
Current log sequence 207
sql> alter system switch logfile;
System altered.
SQL> alter system switch logfile;
System altered.
bash-3.00$ cd /opt/oracle/archive/
bash-3.00$ ls -lt
-rw-r----- 1 oracle dba 1024 Oct 7 09:47 log699206679_1_206.arc
-rw-r----- 1 oracle dba 138240 Oct 7 09:47 log699206679_1_205.arc
to check flash back file
db_recovery_file_dest='/opt/oracle/flash_recovery_area
go to
cd /opt/oracle/flash_recovery_area/
then you will see ORCL dir then flashback dir
$cd ORCL
then
$cd flashback
finally
$pwd
/opt/oracle/flash_recovery_area/ORCL/flashback
-bash-3.00$ ls -lt
total 31152
-rw-r----- 1 oracle dba 15941632 Oct 7 09:49 o1_mf_5drnyvfc_.flb
then create spfile
sql> create spfile from pfile='/tmp/init_orcl.txt' ;
File created.
Subscribe to:
Posts (Atom)
Oracle DBA
anuj blog Archive
- ► 2011 (362)