The below steps explains how to execute shell script from procedure using DBMS_SCHEDULER.
Root access is required for the steps below.
1. Ensure the configuration file $ORACLE_HOME/rdbms/admin/externaljob.ora is owned by root:
cd $ORACLE_HOME/rdbms/admin/
chown root externaljob.ora
$ ls -l externaljob.ora
-rw-r----- 1 root centos 1534 Dec 22 2005 externaljob.ora
2. Ensure the file permissions are correctly set for $ORACLE_HOME/rdbms/admin/externaljob.ora.
Remove write privileges from group and other.
chmod 640 externaljob.ora
ls -la externaljob.ora
-rw-r----- 1 root oinstall 1537 Sep 13 09:24 externaljob.ora
3. Edit $ORACLE_HOME/rdbms/admin/externaljob.ora and set run_user to the OS account that owns the Oracle installation and the database and run_group to the OS group that owns the Oracle_Home.
run_user = oracle
run_group = dba
4. Ensure the setuid bit is set on the $ORACLE_HOME/bin/extjob executable.
cd $ORACLE_HOME/bin
chmod 4750 extjob
$ ls -l extjob
-rwsr-x--- 1 root centos 64911 Jun 17 01:43 extjob
5. Run below procedure
Connect / as sysdba
create or replace procedure PrcCreateOsDir(p_path IN VARCHAR2,p_filename IN VARCHAR2)
is
v_object_job number:=0;
v_object_prg number:=0;
begin
select count(1) into v_object_job from user_objects where object_name='CREATEDIRJOB';
select count(1) into v_object_prg from user_objects where object_name='CREATEDIRPROG';
IF v_object_prg = 0 THEN
DBMS_SCHEDULER.create_program(program_name => 'createdirprog',
program_type => 'EXECUTABLE',
program_action => '/oracle/shell/createdir.sh',
number_of_arguments => 2,
comments => 'Program to create directory at host level.');
END IF;
IF v_object_job = 0 THEN
DBMS_SCHEDULER.CREATE_JOB(job_name => 'createdirjob',
program_name => 'createdirprog');
END IF;
DBMS_SCHEDULER.define_program_argument (program_name => 'createdirprog',
argument_name => 'path',
argument_position => 1,
argument_type => 'VARCHAR2');
DBMS_SCHEDULER.define_program_argument (program_name => 'createdirprog',
argument_name => 'filename',
argument_position => 2,
argument_type => 'VARCHAR2');
DBMS_SCHEDULER.ENABLE(name => 'createdirprog');
Dbms_Scheduler.Set_Job_Argument_Value(Job_Name => 'createdirjob',
argument_Name => 'path',
argument_Value => p_path);
Dbms_Scheduler.Set_Job_Argument_Value(Job_Name => 'createdirjob',
argument_Name => 'filename',
argument_Value => p_filename );
DBMS_SCHEDULER.run_job(job_name => 'createdirjob');
end;
/
6. Shell script to create directory
$cat createdir.sh
#!/bin/bash
mkdir $1/$2
7. Execute procedure to create directory
SQL> exec PrcCreateOsDir('/oracle/data','mydir');
PL/SQL procedure successfully completed.
Tuesday, July 15, 2008
Multithreaded Export and Import
The below script used for doing multithreaded export and import with list of tables specified in tablelist file
$ cat initiate
#!/bin/bash
cleanup()
{
rm -fr dumpdir
mkdir dumpdir
rm -fr expdone
mkdir expdone
rm -fr impdone
mkdir impdone
rm -fr explock
mkdir explock
rm -fr implock
mkdir implock
rm -fr explog
mkdir explog
rm -fr implog
mkdir implog
rm -fr expproglog
mkdir expproglog
rm -fr impproglog
mkdir impproglog
}
createproc()
{
i=1
while [ $i -le $pid ]
do
./export $i > ${PWD}/expproglog/export_$i.log &
echo " Export started with process id $i"
sleep 2
./import $i > ${PWD}/impproglog/import_$i.log &
echo " Import started with process id $i"
sleep 2
i=$((i+1))
done
}
echo "Enter number of export & import process to start.. "
read pid
echo "Doing cleanup of dump and log directories.."
cleanup
sleep 2
echo "Cleanup done.."
createproc
echo " Export and import process started, Check the log for progress"
$cat export
#!/bin/bash
export()
{
tablelist=`cat tablelist`
j=`wc -l tablelist | awk ' { print $1 } '`
exec 3
while [ $j -gt 0 ]
do
read <&3 tableiden
table=`echo $tableiden | awk ' { print $1 } '`
iden=`echo $tableiden | awk ' { print $2 } '`
if [ -e "${PWD}/expdone/expdone.${table}" ] ; then
SKIP=Y
elif [ -e "${PWD}/explock/explock.${table}" ] ; then
SKIP=Y
else
SKIP=N
fi
if [ "$SKIP" = "N" ] ; then
touch ${PWD}/explock/explock.${table}
dateecho "About to start export of table $table.."
exp user1/user1@prod1 file=${PWD}/dumpdir/exp_$table.dmp log=${PWD}/explog/exp_$table.log tables=$table > /dev/null 2>&1
sleep 2
touch ${PWD}/expdone/expdone.${table}
rm -f ${PWD}/explock/explock.${table}
dateecho "completed export of table $table.."
fi
SKIP=N
j=$((j-1))
done
}
function dateecho
{
datevar=`date '+%Y/%m/%d %H:%M:%S'`
echo "$datevar $*"
}
proc=$1
dateecho "Export process started with id $1.."
while [ 1 ]
do
count=`wc -l tablelist | awk ' { print $1 } '`
counttask=`ls -ltr ${PWD}/expdone | grep -v total | wc -l`
if [ $count != $counttask ] ; then
export
else
dateecho "Export process $proc completed successfully.."
exit 1
fi
done
$ cat import
#!/bin/bash
import()
{
tablelist=`cat tablelist`
j=`wc -l tablelist | awk ' { print $1 } '`
exec 3
while [ $j -gt 0 ]
do
read <&3 tableiden
table=`echo $tableiden | awk ' { print $1 } '`
iden=`echo $tableiden | awk ' { print $2 } '`
if [ -e "${PWD}/expdone/expdone.${table}" ] ; then
if [ -e "${PWD}/impdone/impdone.${table}" ] ; then
SKIP=Y
elif [ -e "${PWD}/implock/implock.${table}" ] ; then
SKIP=Y
else
SKIP=N
fi
else
SKIP=Y
fi
if [ "$SKIP" = "N" ] ; then
touch ${PWD}/implock/implock.${table}
dateecho "About to start import of table $table.."
imp user2/user2@prod2 file=${PWD}/dumpdir/exp_$table.dmp log=${PWD}/implog/imp_$table.log full=y > /dev/null 2>&1
sleep 2
touch ${PWD}/impdone/impdone.${table}
rm -f ${PWD}/implock/implock.${table}
dateecho "Completed import of table $table.."
fi
SKIP=N
j=$((j-1))
done
}
function dateecho
{
datevar=`date '+%Y/%m/%d %H:%M:%S'`
echo "$datevar $*"
}
proc=$1
dateecho "Import process started with id $1.."
while [ 1 ]
do
count=`wc -l tablelist | awk ' { print $1 } '`
counttask=`ls -ltr ${PWD}/impdone | grep -v total | wc -l`
if [ $count != $counttask ] ; then
import
else
dateecho "Import process $proc completed successfully.."
exit 1
fi
done
$ cat initiate
#!/bin/bash
cleanup()
{
rm -fr dumpdir
mkdir dumpdir
rm -fr expdone
mkdir expdone
rm -fr impdone
mkdir impdone
rm -fr explock
mkdir explock
rm -fr implock
mkdir implock
rm -fr explog
mkdir explog
rm -fr implog
mkdir implog
rm -fr expproglog
mkdir expproglog
rm -fr impproglog
mkdir impproglog
}
createproc()
{
i=1
while [ $i -le $pid ]
do
./export $i > ${PWD}/expproglog/export_$i.log &
echo " Export started with process id $i"
sleep 2
./import $i > ${PWD}/impproglog/import_$i.log &
echo " Import started with process id $i"
sleep 2
i=$((i+1))
done
}
echo "Enter number of export & import process to start.. "
read pid
echo "Doing cleanup of dump and log directories.."
cleanup
sleep 2
echo "Cleanup done.."
createproc
echo " Export and import process started, Check the log for progress"
$cat export
#!/bin/bash
export()
{
tablelist=`cat tablelist`
j=`wc -l tablelist | awk ' { print $1 } '`
exec 3
while [ $j -gt 0 ]
do
read <&3 tableiden
table=`echo $tableiden | awk ' { print $1 } '`
iden=`echo $tableiden | awk ' { print $2 } '`
if [ -e "${PWD}/expdone/expdone.${table}" ] ; then
SKIP=Y
elif [ -e "${PWD}/explock/explock.${table}" ] ; then
SKIP=Y
else
SKIP=N
fi
if [ "$SKIP" = "N" ] ; then
touch ${PWD}/explock/explock.${table}
dateecho "About to start export of table $table.."
exp user1/user1@prod1 file=${PWD}/dumpdir/exp_$table.dmp log=${PWD}/explog/exp_$table.log tables=$table > /dev/null 2>&1
sleep 2
touch ${PWD}/expdone/expdone.${table}
rm -f ${PWD}/explock/explock.${table}
dateecho "completed export of table $table.."
fi
SKIP=N
j=$((j-1))
done
}
function dateecho
{
datevar=`date '+%Y/%m/%d %H:%M:%S'`
echo "$datevar $*"
}
proc=$1
dateecho "Export process started with id $1.."
while [ 1 ]
do
count=`wc -l tablelist | awk ' { print $1 } '`
counttask=`ls -ltr ${PWD}/expdone | grep -v total | wc -l`
if [ $count != $counttask ] ; then
export
else
dateecho "Export process $proc completed successfully.."
exit 1
fi
done
$ cat import
#!/bin/bash
import()
{
tablelist=`cat tablelist`
j=`wc -l tablelist | awk ' { print $1 } '`
exec 3
while [ $j -gt 0 ]
do
read <&3 tableiden
table=`echo $tableiden | awk ' { print $1 } '`
iden=`echo $tableiden | awk ' { print $2 } '`
if [ -e "${PWD}/expdone/expdone.${table}" ] ; then
if [ -e "${PWD}/impdone/impdone.${table}" ] ; then
SKIP=Y
elif [ -e "${PWD}/implock/implock.${table}" ] ; then
SKIP=Y
else
SKIP=N
fi
else
SKIP=Y
fi
if [ "$SKIP" = "N" ] ; then
touch ${PWD}/implock/implock.${table}
dateecho "About to start import of table $table.."
imp user2/user2@prod2 file=${PWD}/dumpdir/exp_$table.dmp log=${PWD}/implog/imp_$table.log full=y > /dev/null 2>&1
sleep 2
touch ${PWD}/impdone/impdone.${table}
rm -f ${PWD}/implock/implock.${table}
dateecho "Completed import of table $table.."
fi
SKIP=N
j=$((j-1))
done
}
function dateecho
{
datevar=`date '+%Y/%m/%d %H:%M:%S'`
echo "$datevar $*"
}
proc=$1
dateecho "Import process started with id $1.."
while [ 1 ]
do
count=`wc -l tablelist | awk ' { print $1 } '`
counttask=`ls -ltr ${PWD}/impdone | grep -v total | wc -l`
if [ $count != $counttask ] ; then
import
else
dateecho "Import process $proc completed successfully.."
exit 1
fi
done
Monday, July 14, 2008
Logical Standby
Steps to create logical standby database which has similar file system structure.
1) Identify unsupported objects and tables that have no primary key or unique constratint
SELECT * FROM DBA_LOGSTDBY_UNSUPPORTED;
SELECT OWNER, TABLE_NAME, BAD_COLUMN FROM DBA_LOGSTDBY_NOT_UNIQUE;
Change unsupported column and add primary key if any rows selected from above queries respectively
2) On primary database enable below intialization parametera and ensure that database archivelog mode.
log_archive_dest_1 = "location=/oracle/ORCLDB_P/arch"
log_archive_dest_2 = "SERVICE=ORCLDB_S LGWR"
log_archive_dest_state_1 = ENABLE
log_archive_dest_state_2 = ENABLE
log_archive_format = ARCH_%t_%r_%s.arc
Shutdown immediate
Startup mount
alter database archivelog;
alter database open;
archive log list;
3) On Standby Database enable the below intialization parameters
log_archive_start = true
log_archive_dest_1 = "location=/oracle/ORCLDB_P/arch"
log_archive_dest_state_1 = ENABLE
log_archive_format = ARCH_%t_%r_%s.arc
4) Enable supplemental logging on primary key and unique index
ALTER DATABASE ADD SUPPLEMENTAL LOG DATA (PRIMARY KEY, UNIQUE INDEX) COLUMNS;
SELECT SUPPLEMENTAL_LOG_DATA_PK, SUPPLEMENTAL_LOG_DATA_UI from V$DATABASE;
SUP SUP
--- ---
YES YES
5) Make sure log_parallelism is set to 1 (default)
SQL> show parameter LOG_PARALLELISM;
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
log_parallelism integer 1
6) Create alternate tablespace for logical standby system tables
CREATE TABLESPACE LOGMNR DATAFILE '/oracle/ORCLDB_P/oradata/logmnr.dbf' SIZE 50M;;
EXECUTE DBMS_LOGMNR_D.SET_TABLESPACE ('LOGMNR');
7) On Primary perform cold backup and copy all files(datafiles,redo..) to standby site
8) Create Logical standby control file from primary
ALTER DATABASE CREATE LOGICAL STANDBY CONTROLFILE AS '/backup/log.ctl';
9) On primary database and build the log miner dictionary
ALTER SYSTEM ENABLE RESTRICTED SESSION;
EXECUTE DBMS_LOGSTDBY.BUILD;
10) Archive log current
ALTER SYSTEM ARCHIVE LOG CURRENT;
11) Identify archived redo log that contains the log miner dictionary
SELECT NAME FROM V$ARCHIVED_LOG WHERE DICTIONARY_BEGIN='YES' AND STANDBY_DEST='NO';
12) Startup mount standby database and turn on the database guard
STARTUP MOUNT;
ALTER DATABASE GUARD ALL;
ALTER DATABASE OPEN;
13) Create tempfile on standby database
alter tablespace TEMP ADD TEMPFILE '/oracle/ORCLDB_P/oradata/temp.dbf' size 200M reuse
14) On standby database, register logfile identified in step 11
alter database register logical logfile '/oracle/ORCLDB_P/ARCH_001_8474994_00011.arc'
15) alter database to begin SQL apply on the logical standby
ALTER DATABASE START LOGICAL STANDBY APPLY INITIAL;
16) Register database with listener
ALTER SYSTEM REGISTER;
17) Check V$LOGSTDBY & DBA_LOGSTDBY_PROGRESS whether archived logs being applied if not start apply again
ALTER DATABASE START LOGICAL STANDBY APPLY ;
select SEQUENCE#, FIRST_TIME, NEXT_TIME, DICT_BEGIN, DICT_END from DBA_LOGSTDBY_LOG order by SEQUENCE#;
1) Identify unsupported objects and tables that have no primary key or unique constratint
SELECT * FROM DBA_LOGSTDBY_UNSUPPORTED;
SELECT OWNER, TABLE_NAME, BAD_COLUMN FROM DBA_LOGSTDBY_NOT_UNIQUE;
Change unsupported column and add primary key if any rows selected from above queries respectively
2) On primary database enable below intialization parametera and ensure that database archivelog mode.
log_archive_dest_1 = "location=/oracle/ORCLDB_P/arch"
log_archive_dest_2 = "SERVICE=ORCLDB_S LGWR"
log_archive_dest_state_1 = ENABLE
log_archive_dest_state_2 = ENABLE
log_archive_format = ARCH_%t_%r_%s.arc
Shutdown immediate
Startup mount
alter database archivelog;
alter database open;
archive log list;
3) On Standby Database enable the below intialization parameters
log_archive_start = true
log_archive_dest_1 = "location=/oracle/ORCLDB_P/arch"
log_archive_dest_state_1 = ENABLE
log_archive_format = ARCH_%t_%r_%s.arc
4) Enable supplemental logging on primary key and unique index
ALTER DATABASE ADD SUPPLEMENTAL LOG DATA (PRIMARY KEY, UNIQUE INDEX) COLUMNS;
SELECT SUPPLEMENTAL_LOG_DATA_PK, SUPPLEMENTAL_LOG_DATA_UI from V$DATABASE;
SUP SUP
--- ---
YES YES
5) Make sure log_parallelism is set to 1 (default)
SQL> show parameter LOG_PARALLELISM;
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
log_parallelism integer 1
6) Create alternate tablespace for logical standby system tables
CREATE TABLESPACE LOGMNR DATAFILE '/oracle/ORCLDB_P/oradata/logmnr.dbf' SIZE 50M;;
EXECUTE DBMS_LOGMNR_D.SET_TABLESPACE ('LOGMNR');
7) On Primary perform cold backup and copy all files(datafiles,redo..) to standby site
8) Create Logical standby control file from primary
ALTER DATABASE CREATE LOGICAL STANDBY CONTROLFILE AS '/backup/log.ctl';
9) On primary database and build the log miner dictionary
ALTER SYSTEM ENABLE RESTRICTED SESSION;
EXECUTE DBMS_LOGSTDBY.BUILD;
10) Archive log current
ALTER SYSTEM ARCHIVE LOG CURRENT;
11) Identify archived redo log that contains the log miner dictionary
SELECT NAME FROM V$ARCHIVED_LOG WHERE DICTIONARY_BEGIN='YES' AND STANDBY_DEST='NO';
12) Startup mount standby database and turn on the database guard
STARTUP MOUNT;
ALTER DATABASE GUARD ALL;
ALTER DATABASE OPEN;
13) Create tempfile on standby database
alter tablespace TEMP ADD TEMPFILE '/oracle/ORCLDB_P/oradata/temp.dbf' size 200M reuse
14) On standby database, register logfile identified in step 11
alter database register logical logfile '/oracle/ORCLDB_P/ARCH_001_8474994_00011.arc'
15) alter database to begin SQL apply on the logical standby
ALTER DATABASE START LOGICAL STANDBY APPLY INITIAL;
16) Register database with listener
ALTER SYSTEM REGISTER;
17) Check V$LOGSTDBY & DBA_LOGSTDBY_PROGRESS whether archived logs being applied if not start apply again
ALTER DATABASE START LOGICAL STANDBY APPLY ;
select SEQUENCE#, FIRST_TIME, NEXT_TIME, DICT_BEGIN, DICT_END from DBA_LOGSTDBY_LOG order by SEQUENCE#;
Physical Standby
Steps to create physical standby database which has similar file system structure.
1) Put the primary database in force logging mode
ALTER DATABASE FORCE LOGGING;
2) On primary database enable the below intialization parameter and ensure that database archivelog mode.
log_archive_dest_1='LOCATION=/oracle/ORCLDB_P/arch’
log_archive_dest_2='SERVICE=ORCLDB_S reopen=60'
log_archive_dest_state_1=enable
log_archive_dest_state_2=enable
log_archive_format=ARCH_%t_%r_%s.dbf
Shutdown immediate
Startup mount
alter database archivelog;
alter database open;
archive log list;
3) On Standby Database enable the below intialization parameter
fal_server=ORCLDB_P
fal_client=ORCLDB_S
log_archive_dest_1='LOCATION=/oracle/ORCLDB_P/arch'
log_archive_dest_state_1=enable
log_archive_format=ARCH_%t_%r_%s.dbf
4) Startup nomount database
startup nomount;
5) On standby use RMAN Duplicate to restore standby database from primary
rman target sys/manager@ORCLDB_P auxiliary sys/manager nocatalog
run
{
allocate auxiliary channel c1 type disk;
DUPLICATE TARGET DATABASE FOR STANDBY DORECOVER;
}
6) Put standby database in recover managed mode
alter database recover managed standby database disconnect from session;
1) Put the primary database in force logging mode
ALTER DATABASE FORCE LOGGING;
2) On primary database enable the below intialization parameter and ensure that database archivelog mode.
log_archive_dest_1='LOCATION=/oracle/ORCLDB_P/arch’
log_archive_dest_2='SERVICE=ORCLDB_S reopen=60'
log_archive_dest_state_1=enable
log_archive_dest_state_2=enable
log_archive_format=ARCH_%t_%r_%s.dbf
Shutdown immediate
Startup mount
alter database archivelog;
alter database open;
archive log list;
3) On Standby Database enable the below intialization parameter
fal_server=ORCLDB_P
fal_client=ORCLDB_S
log_archive_dest_1='LOCATION=/oracle/ORCLDB_P/arch'
log_archive_dest_state_1=enable
log_archive_format=ARCH_%t_%r_%s.dbf
4) Startup nomount database
startup nomount;
5) On standby use RMAN Duplicate to restore standby database from primary
rman target sys/manager@ORCLDB_P auxiliary sys/manager nocatalog
run
{
allocate auxiliary channel c1 type disk;
DUPLICATE TARGET DATABASE FOR STANDBY DORECOVER;
}
6) Put standby database in recover managed mode
alter database recover managed standby database disconnect from session;
Sunday, July 13, 2008
Duplicate Database
1) On source database ORCLDB1, backup database with archivelog
Connect to RMAN
rman target sys/manager nocatalog
run
{
allocate channel c1 type disk;
backup database plus archivelog
format '/oracle/backup/orcldg1_%U.bck';
release channel c1;
}
2) On target database ORCLDB2, Run below script with duplicate database command
Connect to RMAN
rman target sys/oracle@ORCLDB1 auxiliary sys/oracle nocatalog
run
{
allocate auxiliary channel c1 type disk;
set newname for datafile 1 to '/oradata/db2/SYSTEM01.DBF';
set newname for datafile 2 to '/oradata/db2/UNDOTBS01.DBF';
set newname for datafile 3 to '/oradata/db2/SYSAUX01.DBF';
set newname for datafile 4 to '/oradata/db2/USERS01.DBF';
set newname for datafile 5 to '/oradata/db2/EXAMPLE01.DBF';
set newname for datafile 6 to '/oradata/db2/STRMTAB_01.DBF';
set newname for datafile 7 to '/oradata/db2/LOGMNRTST_01.DBF';
set newname for tempfile 1 to '/oradata/db2/TEMP01.DBF';
duplicate target database to ORCLDB2
logfile
group 1 ('/oradata/db2/redo1.log') size 200M reuse,
group 2 ('/oradata/db2/redo2.log') size 200M reuse,
group 3 ('/oradata/db2/redo3.log') size 200M reuse;
}
Connect to RMAN
rman target sys/manager nocatalog
run
{
allocate channel c1 type disk;
backup database plus archivelog
format '/oracle/backup/orcldg1_%U.bck';
release channel c1;
}
2) On target database ORCLDB2, Run below script with duplicate database command
Connect to RMAN
rman target sys/oracle@ORCLDB1 auxiliary sys/oracle nocatalog
run
{
allocate auxiliary channel c1 type disk;
set newname for datafile 1 to '/oradata/db2/SYSTEM01.DBF';
set newname for datafile 2 to '/oradata/db2/UNDOTBS01.DBF';
set newname for datafile 3 to '/oradata/db2/SYSAUX01.DBF';
set newname for datafile 4 to '/oradata/db2/USERS01.DBF';
set newname for datafile 5 to '/oradata/db2/EXAMPLE01.DBF';
set newname for datafile 6 to '/oradata/db2/STRMTAB_01.DBF';
set newname for datafile 7 to '/oradata/db2/LOGMNRTST_01.DBF';
set newname for tempfile 1 to '/oradata/db2/TEMP01.DBF';
duplicate target database to ORCLDB2
logfile
group 1 ('/oradata/db2/redo1.log') size 200M reuse,
group 2 ('/oradata/db2/redo2.log') size 200M reuse,
group 3 ('/oradata/db2/redo3.log') size 200M reuse;
}
Refreshing Database
1) Backup up source database
rman target sys/manager nocatalog
run
{
allocate channel c1 type disk;
backup database format '\backup\db1_%U.bck';
release channel c1;
}
2) Backup control file on source database
ALTER DATABASE BACKUP CONTROLFILE TO '/backup/control.bck';
3) Copy all required archived logs and backup files along with control file backup to target database archive and backup directory respectively.
4) Shutdown target database
5) Copy backup control file to target control file location specified in parameter file
6) Change the db_name and archive log format of target database parameter file similar to source.
7) Startup mount target database
8) Connect to RMAN on target database and restore database (Use set newname to specify new file system path)
rman target sys/manager nocatalog
run
{
allocate channel c1 type disk;
set newname for datafile 1 to '/oradata/db2/SYSTEM01.DBF';
set newname for datafile 2 to '/oradata/db2/UNDOTBS01.DBF';
set newname for datafile 3 to '/oradata/db2/SYSAUX01.DBF';
set newname for datafile 4 to '/oradata/db2/USERS01.DBF';
set newname for datafile 5 to '/oradata/db2/EXAMPLE01.DBF';
set newname for datafile 6 to '/oradata/db2/STRMTAB_01.DBF';
set newname for datafile 7 to '/oradata/db2/LOGMNRTST_01.DBF';
restore database;
switch datafile all;
release channel c1;
}
9) Recover database
recover database using backup controlfile until cancel;
10) Take control file trace of target database
alter database backup controlfile to trace;
11) Edit the trace control file which is in udump directory with target database name and set database option
12) Revert back the parameter of target database with old dbname and archive log format
13) Shutdown immediate target database
14) Start nomount target database
15) Recreate the control file which has been edited in step 11
16) Open database with resetlogs option
alter database open resetlogs;
17) Add tempfile with reuse option and rename global database name
rman target sys/manager nocatalog
run
{
allocate channel c1 type disk;
backup database format '\backup\db1_%U.bck';
release channel c1;
}
2) Backup control file on source database
ALTER DATABASE BACKUP CONTROLFILE TO '/backup/control.bck';
3) Copy all required archived logs and backup files along with control file backup to target database archive and backup directory respectively.
4) Shutdown target database
5) Copy backup control file to target control file location specified in parameter file
6) Change the db_name and archive log format of target database parameter file similar to source.
7) Startup mount target database
8) Connect to RMAN on target database and restore database (Use set newname to specify new file system path)
rman target sys/manager nocatalog
run
{
allocate channel c1 type disk;
set newname for datafile 1 to '/oradata/db2/SYSTEM01.DBF';
set newname for datafile 2 to '/oradata/db2/UNDOTBS01.DBF';
set newname for datafile 3 to '/oradata/db2/SYSAUX01.DBF';
set newname for datafile 4 to '/oradata/db2/USERS01.DBF';
set newname for datafile 5 to '/oradata/db2/EXAMPLE01.DBF';
set newname for datafile 6 to '/oradata/db2/STRMTAB_01.DBF';
set newname for datafile 7 to '/oradata/db2/LOGMNRTST_01.DBF';
restore database;
switch datafile all;
release channel c1;
}
9) Recover database
recover database using backup controlfile until cancel;
10) Take control file trace of target database
alter database backup controlfile to trace;
11) Edit the trace control file which is in udump directory with target database name and set database option
12) Revert back the parameter of target database with old dbname and archive log format
13) Shutdown immediate target database
14) Start nomount target database
15) Recreate the control file which has been edited in step 11
16) Open database with resetlogs option
alter database open resetlogs;
17) Add tempfile with reuse option and rename global database name
Friday, July 11, 2008
Shell Scripts
Connect to database using Shell script
checkdb.ksh
#Usage: checkdb.ksh ORCLDB
echo $1
sqlplus system@$1/manager<< !
set timing on
set pagesize 100
select name from v\$database;
exit;
!
Steps for splitting and importing
When OS filesize limit set, The dump file can be splitted in to smaller chunks
using split command and imported to database using pipe.
$ split -b2000m devdb.dmp &
$ ls -ltr
total 8393333
-rw-r--r-- 1 hp nobody 1104011840 Feb 22 05:12 xaf
-rw-r--r-- 1 hp nobody 2097152000 Feb 22 05:10 xae
-rw-r--r-- 1 hp nobody 2097152000 Feb 22 05:05 xad
-rw-r--r-- 1 hp nobody 2097152000 Feb 22 05:00 xac
-rw-r--r-- 1 hp nobody 2097152000 Feb 22 04:58 xab
-rw-r--r-- 1 hp nobody 2097152000 Feb 22 04:57 xaa
$ mknod imp_pipe p
$ ls
imp_pipe xaa xab xac xad xae xaf
$ cat xaa xab xac xad xae xaf > imp_pipe &
$imp system/manager@stagedb ignore=y file=imp_pipe log=imp.log fromuser=devuser
touser=stageuser
checkdb.ksh
#Usage: checkdb.ksh ORCLDB
echo $1
sqlplus system@$1/manager<< !
set timing on
set pagesize 100
select name from v\$database;
exit;
!
Steps for splitting and importing
When OS filesize limit set, The dump file can be splitted in to smaller chunks
using split command and imported to database using pipe.
$ split -b2000m devdb.dmp &
$ ls -ltr
total 8393333
-rw-r--r-- 1 hp nobody 1104011840 Feb 22 05:12 xaf
-rw-r--r-- 1 hp nobody 2097152000 Feb 22 05:10 xae
-rw-r--r-- 1 hp nobody 2097152000 Feb 22 05:05 xad
-rw-r--r-- 1 hp nobody 2097152000 Feb 22 05:00 xac
-rw-r--r-- 1 hp nobody 2097152000 Feb 22 04:58 xab
-rw-r--r-- 1 hp nobody 2097152000 Feb 22 04:57 xaa
$ mknod imp_pipe p
$ ls
imp_pipe xaa xab xac xad xae xaf
$ cat xaa xab xac xad xae xaf > imp_pipe &
$imp system/manager@stagedb ignore=y file=imp_pipe log=imp.log fromuser=devuser
touser=stageuser
Sunday, July 6, 2008
Oracle Streams Schema Replication
The below procedure sets one way replication from source to target database
Initialization Parameters For Streams
Set the below intialization paramters
COMPATIBLE =10.1.0 or higher.
GLOBAL_NAMES = TRUE
JOB_QUEUE_PROCESSES= 4 or higher.
PARALLEL_MAX_SERVERS= 2 or higher.
SHARED_POOL_SIZE= 200 MB
OPEN_LINKS= 4 or higher.
TIMED_STATISTICS= true.
LOG_ARCHIVE_DEST_n= set atleast one arch destination, where n is 1, 2, 3, ... 10.
LOG_ARCHIVE_DEST_STATE_n= set to enable.
UNDO_RETENTION >900
_job_queue_interval=1
Create streams adminstrator and grant privileges on both Source and Target
create user STRMADMIN identified by STRMADMIN;
ALTER USER STRMADMIN DEFAULT TABLESACE STRMTAB;
ALTER USER STRMADMIN QUOTA UNLIMITED ON STRMTAB;
GRANT CONNECT, RESOURCE, AQ_ADMINISTRATOR_ROLE to STRMADMIN;
GRANT SELECT ANY DICTIONARY TO STRMADMIN;
GRANT EXECUTE ON DBMS_AQ TO STRMADMIN;
GRANT EXECUTE ON DBMS_AQADM TO STRMADMIN;
GRANT EXECUTE ON DBMS_FLASHBACK TO STRMADMIN;
GRANT EXECUTE ON DBMS_STREAMS_ADM TO STRMADMIN;
GRANT EXECUTE ON DBMS_CAPTURE_ADM TO STRMADMIN;
GRANT EXECUTE ON DBMS_APPLY_ADM TO STRMADMIN;
GRANT EXECUTE ON DBMS_RULE_ADM TO STRMADMIN;
GRANT EXECUTE ON DBMS_PROPAGATION_ADM TO STRMADMIN;
GRANT DBA to STRMADMIN;
execute DBMS_STREAMS_AUTH.GRANT_ADMIN_PRIVILEGE('STRMADMIN');
Create streams queue
connect STRMADMIN/STRMADMIN
BEGIN
DBMS_STREAMS_ADM.SET_UP_QUEUE(
queue_table => 'STREAMS_QUEUE_TABLE',
queue_name => 'STREAMS_QUEUE',
queue_user => 'STRMADMIN');
END;
/
On Target database ORCDDB2
Apply rules for the Schema at the destination database :
BEGIN
DBMS_STREAMS_ADM.ADD_SCHEMA_RULES(
schema_name => 'SCOTT',
streams_type => 'APPLY ',
streams_name => 'STRMADMIN_APPLY',
queue_name => 'STRMADMIN.STREAMS_QUEUE',
include_dml => true,
include_ddl => true,
source_database => 'ORCLDB1');
END;
/
Specify an 'APPLY USER' at the destination database
User which needs to apply all DML statements and DDL statements.
The user specified in the APPLY_USER parameter must have the necessary
privileges to perform DML and DDL changes on the apply objects.
BEGIN
DBMS_APPLY_ADM.ALTER_APPLY(
apply_name => 'STRMADMIN_APPLY',
apply_user => 'SCOTT');
END;
/
Start the Apply process
DECLARE
v_started number;
BEGIN
SELECT decode(status, 'ENABLED', 1, 0) INTO v_started
FROM DBA_APPLY WHERE APPLY_NAME = 'STRMADMIN_APPLY';
if (v_started = 0) then
DBMS_APPLY_ADM.START_APPLY(apply_name => 'STRMADMIN_APPLY');
end if;
END;
/
Steps to be carried out at the Source Database ORCLDB1
Move LogMiner tables from SYSTEM tablespace:
By default, all LogMiner tables are created in the SYSTEM tablespace.
It is a good practice to create an alternate tablespace for the LogMiner
tables.
CREATE TABLESPACE LOGMNRTS DATAFILE 'logmnrts.dbf' SIZE 25M AUTOEXTEND ON
MAXSIZE UNLIMITED;
BEGIN
DBMS_LOGMNR_D.SET_TABLESPACE('LOGMNRTS');
END;
/
Turn on supplemental logging at database level
connect SYS/password as SYSDBA
ALTER DATABASE ADD SUPPLEMENTAL LOG DATA (PRIMARY KEY, UNIQUE INDEX) COLUMNS;
Create a database link to the source and destination database
connect STRMADMIN/STRMADMIN
CREATE DATABASE LINK ORCLDB1 connect to
STRMADMIN identified by STRMADMIN using 'ORCLDB1';
Test the database link to be working properly by querying against the
destination database.
Eg : select * from global_name@ORCLDB1;
connect STRMADMIN/STRMADMIN
CREATE DATABASE LINK ORCLDB2 connect to
STRMADMIN identified by STRMADMIN using 'ORCLDB2';
Test the database link to be working properly by querying against the
destination database.
Eg : select * from global_name@ORCLDB2;
Add capture rules for the schema SCOTT at the source database
BEGIN
DBMS_STREAMS_ADM.ADD_SCHEMA_RULES(
schema_name => 'SCOTT',
streams_type => 'CAPTURE',
streams_name => 'STREAM_CAPTURE',
queue_name => 'STRMADMIN.STREAMS_QUEUE',
include_dml => true,
include_ddl => true,
source_database => 'ORCLDB1');
END;
/
Add propagation rules for the schema SCOTT at the source database
This step will also create a propagation job to the destination database.
BEGIN
DBMS_STREAMS_ADM.ADD_SCHEMA_PROPAGATION_RULES(
schema_name => 'SCOTT',
streams_name => 'STREAM_PROPAGATE',
source_queue_name => 'STRMADMIN.STREAMS_QUEUE',
destination_queue_name => 'STRMADMIN.STREAMS_QUEUE@ORCLDB2',
include_dml => true,
include_ddl => true,
source_database => 'ORCLDB1');
END;
/
Export, import and instantiation of tables from Source to Destination Database
exp USERID=SYSTEM/manager@ORCLDB1 OWNER=SCOTT FILE=scott.dmp LOG=exportTables.log OBJECT_CONSISTENT=Y STATISTICS = NONE
imp USERID=SYSTEM@ORCLDB2 FILE=scott.dmp LOG=importTables.log FULL=Y CONSTRAINTS=Y IGNORE=Y COMMIT=Y STREAMS_INSTANTIATION=Y
Start the Capture process on source database ORCLDB1
begin
DBMS_CAPTURE_ADM.START_CAPTURE(capture_name => 'STREAM_CAPTURE');
end;
/
The setup is now ready to replicate data between the two databases using
Oracle Streams.
Removing a Streams Configuration
To Remove streams configuration use DBMS_STREAMS_ADM.REMOVE_STREAMS_CONFIGURATION and drop Streams user,
CONN SYS/MANAGER AS SYSDBA
EXEC DBMS_STREAMS_ADM.REMOVE_STREAMS_CONFIGURATION();
DROP USER STRMADMIN CASCADE;
SQL Queries for Monitoring Streams
Determining Capture to Dequeue Latency for a Message
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A17
COLUMN LATENCY HEADING 'Latency|in|Seconds' FORMAT 9999
COLUMN CREATION HEADING 'Message Creation' FORMAT A17
COLUMN LAST_DEQUEUE HEADING 'Last Dequeue Time' FORMAT A20
COLUMN DEQUEUED_MESSAGE_NUMBER HEADING 'Dequeued|Message Number' FORMAT 999999
SELECT APPLY_NAME,
(DEQUEUE_TIME-DEQUEUED_MESSAGE_CREATE_TIME)*86400 LATENCY,
TO_CHAR(DEQUEUED_MESSAGE_CREATE_TIME,'HH24:MI:SS MM/DD/YY') CREATION,
TO_CHAR(DEQUEUE_TIME,'HH24:MI:SS MM/DD/YY') LAST_DEQUEUE,
DEQUEUED_MESSAGE_NUMBER
FROM V$STREAMS_APPLY_READER;
General information about each apply process in a database
COLUMN APPLY_NAME HEADING 'Apply|Process|Name' FORMAT A15
COLUMN QUEUE_NAME HEADING 'Apply|Process|Queue' FORMAT A15
COLUMN RULE_SET_NAME HEADING 'Positive|Rule Set' FORMAT A15
COLUMN NEGATIVE_RULE_SET_NAME HEADING 'Negative|Rule Set' FORMAT A15
COLUMN STATUS HEADING 'Apply|Process|Status' FORMAT A15
SELECT APPLY_NAME,
QUEUE_NAME,
RULE_SET_NAME,
NEGATIVE_RULE_SET_NAME,
STATUS
FROM DBA_APPLY;
COLUMN APPLY_NAME HEADING 'Apply Process Name' FORMAT A20
COLUMN APPLY_CAPTURED HEADING 'Type of Messages Applied' FORMAT A25
COLUMN APPLY_USER HEADING 'Apply User' FORMAT A30
SELECT APPLY_NAME,
DECODE(APPLY_CAPTURED,
'YES', 'Captured',
'NO', 'User-Enqueued') APPLY_CAPTURED,
APPLY_USER
FROM DBA_APPLY;
List Parameter Settings for Each Apply Process
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A15
COLUMN PARAMETER HEADING 'Parameter' FORMAT A25
COLUMN VALUE HEADING 'Value' FORMAT A20
COLUMN SET_BY_USER HEADING 'Set by User?' FORMAT A15
SELECT APPLY_NAME,
PARAMETER,
VALUE,
SET_BY_USER
FROM DBA_APPLY_PARAMETERS;
Displays All of the Error Handlers for Local Apply Processes
COLUMN OBJECT_OWNER HEADING 'Table|Owner' FORMAT A5
COLUMN OBJECT_NAME HEADING 'Table Name' FORMAT A10
COLUMN OPERATION_NAME HEADING 'Operation' FORMAT A10
COLUMN USER_PROCEDURE HEADING 'Handler Procedure' FORMAT A30
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A15
SELECT OBJECT_OWNER,
OBJECT_NAME,
OPERATION_NAME,
USER_PROCEDURE,
APPLY_NAME
FROM DBA_APPLY_DML_HANDLERS
WHERE ERROR_HANDLER = 'Y'
ORDER BY OBJECT_OWNER, OBJECT_NAME;
Displays the Message Handler for Each Apply Process
COLUMN APPLY_NAME HEADING 'Apply Process Name' FORMAT A20
COLUMN MESSAGE_HANDLER HEADING 'Message Handler' FORMAT A20
SELECT APPLY_NAME, MESSAGE_HANDLER FROM DBA_APPLY
WHERE MESSAGE_HANDLER IS NOT NULL;
Displays Precommit Handler for Each Apply Process
COLUMN APPLY_NAME HEADING 'Apply Process Name' FORMAT A20
COLUMN PRECOMMIT_HANDLER HEADING 'Precommit Handler' FORMAT A30
COLUMN APPLY_CAPTURED HEADING 'Type of|Messages|Applied' FORMAT A15
SELECT APPLY_NAME,
PRECOMMIT_HANDLER,
DECODE(APPLY_CAPTURED,
'YES', 'Captured',
'NO', 'User-Enqueued') APPLY_CAPTURED
FROM DBA_APPLY
WHERE PRECOMMIT_HANDLER IS NOT NULL;
Displays Information About the Reader Server for Each Apply Process
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A15
COLUMN APPLY_CAPTURED HEADING 'Apply Type' FORMAT A22
COLUMN PROCESS_NAME HEADING 'Process|Name' FORMAT A7
COLUMN STATE HEADING 'State' FORMAT A17
COLUMN TOTAL_MESSAGES_DEQUEUED HEADING 'Total Messages|Dequeued' FORMAT 99999999
SELECT r.APPLY_NAME,
DECODE(ap.APPLY_CAPTURED,
'YES','Captured LCRS',
'NO','User-enqueued messages','UNKNOWN') APPLY_CAPTURED,
SUBSTR(s.PROGRAM,INSTR(s.PROGRAM,'(')+1,4) PROCESS_NAME,
r.STATE,
r.TOTAL_MESSAGES_DEQUEUED
FROM V$STREAMS_APPLY_READER r, V$SESSION s, DBA_APPLY ap
WHERE r.SID = s.SID AND
r.SERIAL# = s.SERIAL# AND
r.APPLY_NAME = ap.APPLY_NAME;
Monitoring Transactions and Messages Spilled by Each Apply Process
COLUMN APPLY_NAME HEADING 'Apply Name' FORMAT A20
COLUMN 'Transaction ID' HEADING 'Transaction ID' FORMAT A15
COLUMN FIRST_SCN HEADING 'First SCN' FORMAT 99999999
COLUMN MESSAGE_COUNT HEADING 'Message Count' FORMAT 99999999
SELECT APPLY_NAME,
XIDUSN ||'.'||
XIDSLT ||'.'||
XIDSQN "Transaction ID",
FIRST_SCN,
MESSAGE_COUNT
FROM DBA_APPLY_SPILL_TXN;
COLUMN APPLY_NAME HEADING 'Apply Name' FORMAT A15
COLUMN TOTAL_MESSAGES_SPILLED HEADING 'Total|Spilled Messages' FORMAT 99999999
COLUMN ELAPSED_SPILL_TIME HEADING 'Elapsed Time|Spilling Messages' FORMAT 99999999.99
SELECT APPLY_NAME,
TOTAL_MESSAGES_SPILLED,
(ELAPSED_SPILL_TIME/100) ELAPSED_SPILL_TIME
FROM V$STREAMS_APPLY_READER;
Displaying General Information About Each Coordinator Process
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A17
COLUMN LATENCY HEADING 'Latency|in|Seconds' FORMAT 9999
COLUMN CREATION HEADING 'Message Creation' FORMAT A17
COLUMN LAST_DEQUEUE HEADING 'Last Dequeue Time' FORMAT A20
COLUMN DEQUEUED_MESSAGE_NUMBER HEADING 'Dequeued|Message Number' FORMAT 999999
SELECT APPLY_NAME,
(DEQUEUE_TIME-DEQUEUED_MESSAGE_CREATE_TIME)*86400 LATENCY,
TO_CHAR(DEQUEUED_MESSAGE_CREATE_TIME,'HH24:MI:SS MM/DD/YY') CREATION,
TO_CHAR(DEQUEUE_TIME,'HH24:MI:SS MM/DD/YY') LAST_DEQUEUE,
DEQUEUED_MESSAGE_NUMBER
FROM V$STREAMS_APPLY_READER;
Displaying Information About Transactions Received and Applied
COLUMN APPLY_NAME HEADING 'Apply Process Name' FORMAT A25
COLUMN TOTAL_RECEIVED HEADING 'Total|Trans|Received' FORMAT 99999999
COLUMN TOTAL_APPLIED HEADING 'Total|Trans|Applied' FORMAT 99999999
COLUMN TOTAL_ERRORS HEADING 'Total|Apply|Errors' FORMAT 9999
COLUMN BEING_APPLIED HEADING 'Total|Trans Being|Applied' FORMAT 99999999
COLUMN TOTAL_IGNORED HEADING 'Total|Trans|Ignored' FORMAT 99999999
SELECT APPLY_NAME,
TOTAL_RECEIVED,
TOTAL_APPLIED,
TOTAL_ERRORS,
(TOTAL_ASSIGNED - (TOTAL_ROLLBACKS + TOTAL_APPLIED)) BEING_APPLIED,
TOTAL_IGNORED
FROM V$STREAMS_APPLY_COORDINATOR;
V$STREAMS_APPLY_COORDINATOR Query for Latency
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A17
COLUMN 'Latency in Seconds' FORMAT 999999
COLUMN 'Message Creation' FORMAT A17
COLUMN 'Apply Time' FORMAT A17
COLUMN HWM_MESSAGE_NUMBER HEADING 'Applied|Message|Number' FORMAT 999999
SELECT APPLY_NAME,
(HWM_TIME-HWM_MESSAGE_CREATE_TIME)*86400 "Latency in Seconds",
TO_CHAR(HWM_MESSAGE_CREATE_TIME,'HH24:MI:SS MM/DD/YY')
"Message Creation",
TO_CHAR(HWM_TIME,'HH24:MI:SS MM/DD/YY') "Apply Time",
HWM_MESSAGE_NUMBER
FROM V$STREAMS_APPLY_COORDINATOR;
DBA_APPLY_PROGRESS Query for Latency
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A17
COLUMN 'Latency in Seconds' FORMAT 999999
COLUMN 'Message Creation' FORMAT A17
COLUMN 'Apply Time' FORMAT A17
COLUMN APPLIED_MESSAGE_NUMBER HEADING 'Applied|Message|Number' FORMAT 999999
SELECT APPLY_NAME,
(APPLY_TIME-APPLIED_MESSAGE_CREATE_TIME)*86400 "Latency in Seconds",
TO_CHAR(APPLIED_MESSAGE_CREATE_TIME,'HH24:MI:SS MM/DD/YY')
"Message Creation",
TO_CHAR(APPLY_TIME,'HH24:MI:SS MM/DD/YY') "Apply Time",
APPLIED_MESSAGE_NUMBER
FROM DBA_APPLY_PROGRESS;
Checking for Apply Errors
COLUMN APPLY_NAME HEADING 'Apply|Process|Name' FORMAT A10
COLUMN SOURCE_DATABASE HEADING 'Source|Database' FORMAT A10
COLUMN LOCAL_TRANSACTION_ID HEADING 'Local|Transaction|ID' FORMAT A11
COLUMN ERROR_NUMBER HEADING 'Error Number' FORMAT 99999999
COLUMN ERROR_MESSAGE HEADING 'Error Message' FORMAT A20
COLUMN MESSAGE_COUNT HEADING 'Messages in|Error|Transaction' FORMAT 99999999
SELECT APPLY_NAME,
SOURCE_DATABASE,
LOCAL_TRANSACTION_ID,
ERROR_NUMBER,
ERROR_MESSAGE,
MESSAGE_COUNT
FROM DBA_APPLY_ERROR;
Initialization Parameters For Streams
Set the below intialization paramters
COMPATIBLE =10.1.0 or higher.
GLOBAL_NAMES = TRUE
JOB_QUEUE_PROCESSES= 4 or higher.
PARALLEL_MAX_SERVERS= 2 or higher.
SHARED_POOL_SIZE= 200 MB
OPEN_LINKS= 4 or higher.
TIMED_STATISTICS= true.
LOG_ARCHIVE_DEST_n= set atleast one arch destination, where n is 1, 2, 3, ... 10.
LOG_ARCHIVE_DEST_STATE_n= set to enable.
UNDO_RETENTION >900
_job_queue_interval=1
Create streams adminstrator and grant privileges on both Source and Target
create user STRMADMIN identified by STRMADMIN;
ALTER USER STRMADMIN DEFAULT TABLESACE STRMTAB;
ALTER USER STRMADMIN QUOTA UNLIMITED ON STRMTAB;
GRANT CONNECT, RESOURCE, AQ_ADMINISTRATOR_ROLE to STRMADMIN;
GRANT SELECT ANY DICTIONARY TO STRMADMIN;
GRANT EXECUTE ON DBMS_AQ TO STRMADMIN;
GRANT EXECUTE ON DBMS_AQADM TO STRMADMIN;
GRANT EXECUTE ON DBMS_FLASHBACK TO STRMADMIN;
GRANT EXECUTE ON DBMS_STREAMS_ADM TO STRMADMIN;
GRANT EXECUTE ON DBMS_CAPTURE_ADM TO STRMADMIN;
GRANT EXECUTE ON DBMS_APPLY_ADM TO STRMADMIN;
GRANT EXECUTE ON DBMS_RULE_ADM TO STRMADMIN;
GRANT EXECUTE ON DBMS_PROPAGATION_ADM TO STRMADMIN;
GRANT DBA to STRMADMIN;
execute DBMS_STREAMS_AUTH.GRANT_ADMIN_PRIVILEGE('STRMADMIN');
Create streams queue
connect STRMADMIN/STRMADMIN
BEGIN
DBMS_STREAMS_ADM.SET_UP_QUEUE(
queue_table => 'STREAMS_QUEUE_TABLE',
queue_name => 'STREAMS_QUEUE',
queue_user => 'STRMADMIN');
END;
/
On Target database ORCDDB2
Apply rules for the Schema at the destination database :
BEGIN
DBMS_STREAMS_ADM.ADD_SCHEMA_RULES(
schema_name => 'SCOTT',
streams_type => 'APPLY ',
streams_name => 'STRMADMIN_APPLY',
queue_name => 'STRMADMIN.STREAMS_QUEUE',
include_dml => true,
include_ddl => true,
source_database => 'ORCLDB1');
END;
/
Specify an 'APPLY USER' at the destination database
User which needs to apply all DML statements and DDL statements.
The user specified in the APPLY_USER parameter must have the necessary
privileges to perform DML and DDL changes on the apply objects.
BEGIN
DBMS_APPLY_ADM.ALTER_APPLY(
apply_name => 'STRMADMIN_APPLY',
apply_user => 'SCOTT');
END;
/
Start the Apply process
DECLARE
v_started number;
BEGIN
SELECT decode(status, 'ENABLED', 1, 0) INTO v_started
FROM DBA_APPLY WHERE APPLY_NAME = 'STRMADMIN_APPLY';
if (v_started = 0) then
DBMS_APPLY_ADM.START_APPLY(apply_name => 'STRMADMIN_APPLY');
end if;
END;
/
Steps to be carried out at the Source Database ORCLDB1
Move LogMiner tables from SYSTEM tablespace:
By default, all LogMiner tables are created in the SYSTEM tablespace.
It is a good practice to create an alternate tablespace for the LogMiner
tables.
CREATE TABLESPACE LOGMNRTS DATAFILE 'logmnrts.dbf' SIZE 25M AUTOEXTEND ON
MAXSIZE UNLIMITED;
BEGIN
DBMS_LOGMNR_D.SET_TABLESPACE('LOGMNRTS');
END;
/
Turn on supplemental logging at database level
connect SYS/password as SYSDBA
ALTER DATABASE ADD SUPPLEMENTAL LOG DATA (PRIMARY KEY, UNIQUE INDEX) COLUMNS;
Create a database link to the source and destination database
connect STRMADMIN/STRMADMIN
CREATE DATABASE LINK ORCLDB1 connect to
STRMADMIN identified by STRMADMIN using 'ORCLDB1';
Test the database link to be working properly by querying against the
destination database.
Eg : select * from global_name@ORCLDB1;
connect STRMADMIN/STRMADMIN
CREATE DATABASE LINK ORCLDB2 connect to
STRMADMIN identified by STRMADMIN using 'ORCLDB2';
Test the database link to be working properly by querying against the
destination database.
Eg : select * from global_name@ORCLDB2;
Add capture rules for the schema SCOTT at the source database
BEGIN
DBMS_STREAMS_ADM.ADD_SCHEMA_RULES(
schema_name => 'SCOTT',
streams_type => 'CAPTURE',
streams_name => 'STREAM_CAPTURE',
queue_name => 'STRMADMIN.STREAMS_QUEUE',
include_dml => true,
include_ddl => true,
source_database => 'ORCLDB1');
END;
/
Add propagation rules for the schema SCOTT at the source database
This step will also create a propagation job to the destination database.
BEGIN
DBMS_STREAMS_ADM.ADD_SCHEMA_PROPAGATION_RULES(
schema_name => 'SCOTT',
streams_name => 'STREAM_PROPAGATE',
source_queue_name => 'STRMADMIN.STREAMS_QUEUE',
destination_queue_name => 'STRMADMIN.STREAMS_QUEUE@ORCLDB2',
include_dml => true,
include_ddl => true,
source_database => 'ORCLDB1');
END;
/
Export, import and instantiation of tables from Source to Destination Database
exp USERID=SYSTEM/manager@ORCLDB1 OWNER=SCOTT FILE=scott.dmp LOG=exportTables.log OBJECT_CONSISTENT=Y STATISTICS = NONE
imp USERID=SYSTEM@ORCLDB2 FILE=scott.dmp LOG=importTables.log FULL=Y CONSTRAINTS=Y IGNORE=Y COMMIT=Y STREAMS_INSTANTIATION=Y
Start the Capture process on source database ORCLDB1
begin
DBMS_CAPTURE_ADM.START_CAPTURE(capture_name => 'STREAM_CAPTURE');
end;
/
The setup is now ready to replicate data between the two databases using
Oracle Streams.
Removing a Streams Configuration
To Remove streams configuration use DBMS_STREAMS_ADM.REMOVE_STREAMS_CONFIGURATION and drop Streams user,
CONN SYS/MANAGER AS SYSDBA
EXEC DBMS_STREAMS_ADM.REMOVE_STREAMS_CONFIGURATION();
DROP USER STRMADMIN CASCADE;
SQL Queries for Monitoring Streams
Determining Capture to Dequeue Latency for a Message
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A17
COLUMN LATENCY HEADING 'Latency|in|Seconds' FORMAT 9999
COLUMN CREATION HEADING 'Message Creation' FORMAT A17
COLUMN LAST_DEQUEUE HEADING 'Last Dequeue Time' FORMAT A20
COLUMN DEQUEUED_MESSAGE_NUMBER HEADING 'Dequeued|Message Number' FORMAT 999999
SELECT APPLY_NAME,
(DEQUEUE_TIME-DEQUEUED_MESSAGE_CREATE_TIME)*86400 LATENCY,
TO_CHAR(DEQUEUED_MESSAGE_CREATE_TIME,'HH24:MI:SS MM/DD/YY') CREATION,
TO_CHAR(DEQUEUE_TIME,'HH24:MI:SS MM/DD/YY') LAST_DEQUEUE,
DEQUEUED_MESSAGE_NUMBER
FROM V$STREAMS_APPLY_READER;
General information about each apply process in a database
COLUMN APPLY_NAME HEADING 'Apply|Process|Name' FORMAT A15
COLUMN QUEUE_NAME HEADING 'Apply|Process|Queue' FORMAT A15
COLUMN RULE_SET_NAME HEADING 'Positive|Rule Set' FORMAT A15
COLUMN NEGATIVE_RULE_SET_NAME HEADING 'Negative|Rule Set' FORMAT A15
COLUMN STATUS HEADING 'Apply|Process|Status' FORMAT A15
SELECT APPLY_NAME,
QUEUE_NAME,
RULE_SET_NAME,
NEGATIVE_RULE_SET_NAME,
STATUS
FROM DBA_APPLY;
COLUMN APPLY_NAME HEADING 'Apply Process Name' FORMAT A20
COLUMN APPLY_CAPTURED HEADING 'Type of Messages Applied' FORMAT A25
COLUMN APPLY_USER HEADING 'Apply User' FORMAT A30
SELECT APPLY_NAME,
DECODE(APPLY_CAPTURED,
'YES', 'Captured',
'NO', 'User-Enqueued') APPLY_CAPTURED,
APPLY_USER
FROM DBA_APPLY;
List Parameter Settings for Each Apply Process
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A15
COLUMN PARAMETER HEADING 'Parameter' FORMAT A25
COLUMN VALUE HEADING 'Value' FORMAT A20
COLUMN SET_BY_USER HEADING 'Set by User?' FORMAT A15
SELECT APPLY_NAME,
PARAMETER,
VALUE,
SET_BY_USER
FROM DBA_APPLY_PARAMETERS;
Displays All of the Error Handlers for Local Apply Processes
COLUMN OBJECT_OWNER HEADING 'Table|Owner' FORMAT A5
COLUMN OBJECT_NAME HEADING 'Table Name' FORMAT A10
COLUMN OPERATION_NAME HEADING 'Operation' FORMAT A10
COLUMN USER_PROCEDURE HEADING 'Handler Procedure' FORMAT A30
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A15
SELECT OBJECT_OWNER,
OBJECT_NAME,
OPERATION_NAME,
USER_PROCEDURE,
APPLY_NAME
FROM DBA_APPLY_DML_HANDLERS
WHERE ERROR_HANDLER = 'Y'
ORDER BY OBJECT_OWNER, OBJECT_NAME;
Displays the Message Handler for Each Apply Process
COLUMN APPLY_NAME HEADING 'Apply Process Name' FORMAT A20
COLUMN MESSAGE_HANDLER HEADING 'Message Handler' FORMAT A20
SELECT APPLY_NAME, MESSAGE_HANDLER FROM DBA_APPLY
WHERE MESSAGE_HANDLER IS NOT NULL;
Displays Precommit Handler for Each Apply Process
COLUMN APPLY_NAME HEADING 'Apply Process Name' FORMAT A20
COLUMN PRECOMMIT_HANDLER HEADING 'Precommit Handler' FORMAT A30
COLUMN APPLY_CAPTURED HEADING 'Type of|Messages|Applied' FORMAT A15
SELECT APPLY_NAME,
PRECOMMIT_HANDLER,
DECODE(APPLY_CAPTURED,
'YES', 'Captured',
'NO', 'User-Enqueued') APPLY_CAPTURED
FROM DBA_APPLY
WHERE PRECOMMIT_HANDLER IS NOT NULL;
Displays Information About the Reader Server for Each Apply Process
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A15
COLUMN APPLY_CAPTURED HEADING 'Apply Type' FORMAT A22
COLUMN PROCESS_NAME HEADING 'Process|Name' FORMAT A7
COLUMN STATE HEADING 'State' FORMAT A17
COLUMN TOTAL_MESSAGES_DEQUEUED HEADING 'Total Messages|Dequeued' FORMAT 99999999
SELECT r.APPLY_NAME,
DECODE(ap.APPLY_CAPTURED,
'YES','Captured LCRS',
'NO','User-enqueued messages','UNKNOWN') APPLY_CAPTURED,
SUBSTR(s.PROGRAM,INSTR(s.PROGRAM,'(')+1,4) PROCESS_NAME,
r.STATE,
r.TOTAL_MESSAGES_DEQUEUED
FROM V$STREAMS_APPLY_READER r, V$SESSION s, DBA_APPLY ap
WHERE r.SID = s.SID AND
r.SERIAL# = s.SERIAL# AND
r.APPLY_NAME = ap.APPLY_NAME;
Monitoring Transactions and Messages Spilled by Each Apply Process
COLUMN APPLY_NAME HEADING 'Apply Name' FORMAT A20
COLUMN 'Transaction ID' HEADING 'Transaction ID' FORMAT A15
COLUMN FIRST_SCN HEADING 'First SCN' FORMAT 99999999
COLUMN MESSAGE_COUNT HEADING 'Message Count' FORMAT 99999999
SELECT APPLY_NAME,
XIDUSN ||'.'||
XIDSLT ||'.'||
XIDSQN "Transaction ID",
FIRST_SCN,
MESSAGE_COUNT
FROM DBA_APPLY_SPILL_TXN;
COLUMN APPLY_NAME HEADING 'Apply Name' FORMAT A15
COLUMN TOTAL_MESSAGES_SPILLED HEADING 'Total|Spilled Messages' FORMAT 99999999
COLUMN ELAPSED_SPILL_TIME HEADING 'Elapsed Time|Spilling Messages' FORMAT 99999999.99
SELECT APPLY_NAME,
TOTAL_MESSAGES_SPILLED,
(ELAPSED_SPILL_TIME/100) ELAPSED_SPILL_TIME
FROM V$STREAMS_APPLY_READER;
Displaying General Information About Each Coordinator Process
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A17
COLUMN LATENCY HEADING 'Latency|in|Seconds' FORMAT 9999
COLUMN CREATION HEADING 'Message Creation' FORMAT A17
COLUMN LAST_DEQUEUE HEADING 'Last Dequeue Time' FORMAT A20
COLUMN DEQUEUED_MESSAGE_NUMBER HEADING 'Dequeued|Message Number' FORMAT 999999
SELECT APPLY_NAME,
(DEQUEUE_TIME-DEQUEUED_MESSAGE_CREATE_TIME)*86400 LATENCY,
TO_CHAR(DEQUEUED_MESSAGE_CREATE_TIME,'HH24:MI:SS MM/DD/YY') CREATION,
TO_CHAR(DEQUEUE_TIME,'HH24:MI:SS MM/DD/YY') LAST_DEQUEUE,
DEQUEUED_MESSAGE_NUMBER
FROM V$STREAMS_APPLY_READER;
Displaying Information About Transactions Received and Applied
COLUMN APPLY_NAME HEADING 'Apply Process Name' FORMAT A25
COLUMN TOTAL_RECEIVED HEADING 'Total|Trans|Received' FORMAT 99999999
COLUMN TOTAL_APPLIED HEADING 'Total|Trans|Applied' FORMAT 99999999
COLUMN TOTAL_ERRORS HEADING 'Total|Apply|Errors' FORMAT 9999
COLUMN BEING_APPLIED HEADING 'Total|Trans Being|Applied' FORMAT 99999999
COLUMN TOTAL_IGNORED HEADING 'Total|Trans|Ignored' FORMAT 99999999
SELECT APPLY_NAME,
TOTAL_RECEIVED,
TOTAL_APPLIED,
TOTAL_ERRORS,
(TOTAL_ASSIGNED - (TOTAL_ROLLBACKS + TOTAL_APPLIED)) BEING_APPLIED,
TOTAL_IGNORED
FROM V$STREAMS_APPLY_COORDINATOR;
V$STREAMS_APPLY_COORDINATOR Query for Latency
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A17
COLUMN 'Latency in Seconds' FORMAT 999999
COLUMN 'Message Creation' FORMAT A17
COLUMN 'Apply Time' FORMAT A17
COLUMN HWM_MESSAGE_NUMBER HEADING 'Applied|Message|Number' FORMAT 999999
SELECT APPLY_NAME,
(HWM_TIME-HWM_MESSAGE_CREATE_TIME)*86400 "Latency in Seconds",
TO_CHAR(HWM_MESSAGE_CREATE_TIME,'HH24:MI:SS MM/DD/YY')
"Message Creation",
TO_CHAR(HWM_TIME,'HH24:MI:SS MM/DD/YY') "Apply Time",
HWM_MESSAGE_NUMBER
FROM V$STREAMS_APPLY_COORDINATOR;
DBA_APPLY_PROGRESS Query for Latency
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A17
COLUMN 'Latency in Seconds' FORMAT 999999
COLUMN 'Message Creation' FORMAT A17
COLUMN 'Apply Time' FORMAT A17
COLUMN APPLIED_MESSAGE_NUMBER HEADING 'Applied|Message|Number' FORMAT 999999
SELECT APPLY_NAME,
(APPLY_TIME-APPLIED_MESSAGE_CREATE_TIME)*86400 "Latency in Seconds",
TO_CHAR(APPLIED_MESSAGE_CREATE_TIME,'HH24:MI:SS MM/DD/YY')
"Message Creation",
TO_CHAR(APPLY_TIME,'HH24:MI:SS MM/DD/YY') "Apply Time",
APPLIED_MESSAGE_NUMBER
FROM DBA_APPLY_PROGRESS;
Checking for Apply Errors
COLUMN APPLY_NAME HEADING 'Apply|Process|Name' FORMAT A10
COLUMN SOURCE_DATABASE HEADING 'Source|Database' FORMAT A10
COLUMN LOCAL_TRANSACTION_ID HEADING 'Local|Transaction|ID' FORMAT A11
COLUMN ERROR_NUMBER HEADING 'Error Number' FORMAT 99999999
COLUMN ERROR_MESSAGE HEADING 'Error Message' FORMAT A20
COLUMN MESSAGE_COUNT HEADING 'Messages in|Error|Transaction' FORMAT 99999999
SELECT APPLY_NAME,
SOURCE_DATABASE,
LOCAL_TRANSACTION_ID,
ERROR_NUMBER,
ERROR_MESSAGE,
MESSAGE_COUNT
FROM DBA_APPLY_ERROR;
Thursday, July 3, 2008
SQL scripts
To know SID and other information from sqlplus session
======================================================
SELECT userenv('SID') SID,sys_context('USERENV', 'CURRENT_SCHEMA') SCHEMA,sys_context('USERENV', 'OS_USER') OSUSER,sys_context('USERENV', 'HOST') HOSTNAME FROM DUAL;
Tracing
=======
At session level
================
exec dbms_system.set_sql_trace_in_session(1473, 32781, TRUE);
select 'exec dbms_system.set_sql_trace_in_session('||s.sid||','||s.serial#||', TRUE);' from v$session s,v$process p
where s.paddr = p.addr and
s.username='SCOTT';
At DB level
===========
ALTER SYSTEM SET trace_enabled = TRUE;
Execute below if trace shows bind variables and wait events
ALTER SYSTEM SET EVENT=’10046 trace name context forever, level 12’ scope=memory;
tkprof dbname_ora_17776.trc testtrace.txt explain=dev/dev@dbname sys=no waits=yes sort=exeela,userid
ls -l dbname_ora*.trc | awk '{print "tkprof "$9" "$9".txt explain=dev/dev@dbname sys=no waits=yes sort=exeela,userid"}'
User session as per Logon time
==============================
set pages 50000;
set linesize 160;
col username for a15
col osuser for a10
col machine for a20
col program for a35
select s.sid,s.serial#,p.spid,s.status,s.username,s.osuser,s.machine,s.program,to_char(s.logon_time,'DD-MON-YYYY HH24:MI:SS') logged_on from v$session s,v$process p where s.paddr = p.addr and s.username='SCOTT'
order by s.osuser;
To kill all Session for user
============================
select 'alter system kill session '''||sid||','||serial#||''';' from v$session where username in ('SCOTT');
Current active session's and sql they are running
================================================
set pages 10000
col username form a20
col machine form a30
col osuser form a10
select s.username,s.sid,s.serial#,s.osuser,p.spid,s.machine,
sql_text,buffer_gets,executions
from v$sqlarea , v$session s,v$process p
where s.sql_address = address
and s.status = 'ACTIVE'
and s.paddr = p.addr
and s.username is not null
order by buffer_gets desc;
List all open cursor
====================
SELECT SID,USER_NAME,SQL_TEXT FROM V$OPEN_CURSOR ORDER BY 1;
Tablespace Usage and freespace
==============================
SET SERVEROUTPUT ON
SET PAGESIZE 1000
SET LINESIZE 160
SELECT Substr(df.tablespace_name,1,20) "Tablespace Name",
Substr(df.file_name,1,40) "File Name",
Round(df.bytes/1024/1024,2) "Size (M)",
Round(e.used_bytes/1024/1024,2) "Used (M)",
Round(f.free_bytes/1024/1024,2) "Free (M)"
FROM DBA_DATA_FILES DF,
(SELECT file_id,
Sum(Decode(bytes,NULL,0,bytes)) used_bytes
FROM dba_extents
GROUP by file_id) E,
(SELECT Max(bytes) free_bytes,
file_id
FROM dba_free_space
GROUP BY file_id) f
WHERE e.file_id (+) = df.file_id
AND df.file_id = f.file_id (+)
ORDER BY df.tablespace_name,
df.file_name;
Undo Information
================
column "UserName" format a8
column "DB Sid" format 999999
column "Unix Pid" format 99999999
column "Trnx_start_time" format a19
column "Current Time" format a19
column "Elapsed(mins)" format 999999999.99
column "Undo Name" format a09
column "Used Undo Blks" format a13
column "Used Undo Size(Kb)" format a17
column "Logical I/O(Blks)" format 99999999999999999
column "Logical I/O(Kb)" format 999999999999999
column "Physical I/O(Blks)" format 999999999999999999
column "Physical I/O(Kb)" format 999999999999999999
SELECT
a.username "UserName"
, a.sid "DB Sid"
, e.spid "Unix Pid"
, TO_CHAR(TO_DATE(b.start_time,'mm/dd/yy hh24:mi:ss'),'yyyy/mm/dd hh24:mi:ss') "Trnx_start_time"
, TO_CHAR(sysdate,'yyyy/mm/dd hh24:mi:ss') "Current Time"
, ROUND(60*24*(sysdate-to_date(b.start_time,'mm/dd/yy hh24:mi:ss')),2) "Elapsed(mins)"
, c.segment_name "Undo Name"
, TO_CHAR(b.used_ublk*d.value/1024) "Used Undo Size(Kb)"
, TO_CHAR(b.used_ublk) "Used Undo Blks"
, b.log_io "Logical I/O(Blks)"
, b.log_io*d.value/1024 "Logical I/O(Kb)"
, b.phy_io "Physical I/O(Blks)"
, b.phy_io*d.value/1024 "Physical I/O(Kb)"
, a.program
FROM
v$session a
, v$transaction b
, dba_rollback_segs c
, v$parameter d
, v$process e
WHERE
b.ses_addr = a.saddr
AND b.xidusn = c.segment_id
AND d.name = 'db_block_size'
AND e.ADDR = a.PADDR
ORDER BY 4
/
select e.sid,e.serial#, substr(a.os_user_name,1,8) "OS User"
, substr(a.oracle_username,1,8) "DB User"
, substr(b.owner,1,8) "Schema"
, substr(b.object_name,1,20) "Object Name"
, substr(b.object_type,1,10) "Type"
, substr(c.segment_name,1,15) "RBS"
, substr(d.used_urec,1,12) "# of Records"
,e.program
from v$locked_object a
, dba_objects b
, dba_rollback_segs c
, v$transaction d
, v$session e
where a.object_id = b.object_id
and a.xidusn = c.segment_id
and a.xidusn = d.xidusn
and a.xidslot = d.xidslot
and d.addr = e.taddr
/
select e.sid
, oracle_username
, b.owner
, b.object_name
, b.object_type
, segment_name
, d.used_urec
, sysdate
from v$locked_object a
, dba_objects b
, dba_rollback_segs c
, v$transaction d
, v$session e
where a.object_id = b.object_id
and a.xidusn = c.segment_id
and a.xidusn = d.xidusn
and a.xidslot = d.xidslot
and d.addr = e.taddr
/
TEMP Tablespace Usage
=====================
SELECT b.sid,b.serial#,a.sql_text
FROM v$sqltext a,
v$session b
WHERE a.address = b.sql_address
AND a.hash_value = b.sql_hash_value
AND b.sid in (select se.sid
from v$sort_usage so,v$session se
where so.session_addr = se.saddr
and se.USERNAME not in ('SYSTEM')
and so.blocks*8/1024 >100)
ORDER BY b.sid,a.piece;
Active session
==============
select s.sid,s.serial#,p.spid,s.username,s.osuser,s.machine, sql_text,v$sqlarea.hash_value,
logon_time,last_call_et,sysdate
from v$sqlarea , v$session s,v$process p
where s.sql_address = address
and s.status = 'ACTIVE'
and s.paddr = p.addr
and s.username is not null
and last_call_et > 0
order by last_call_et
/
select s.username,s.sid,s.serial#,s.osuser,p.spid,s.machine, sql_text,buffer_gets,executions,buffer_gets/executions "gets_per_exec" ,v$sqlarea.hash_value
from v$sqlarea , v$session s,v$process p
where s.sql_address = address
and s.status = 'ACTIVE'
and s.paddr = p.addr
and s.username is not null
and executions > 0
order by buffer_gets desc
/
Locks and Blocking information
==============================
SET LINESIZE 145
SET PAGESIZE 66
COLUMN owner FORMAT a5 HEADING 'Owner'
COLUMN object_type FORMAT a10 HEADING 'Type'
COLUMN object_name FORMAT a25 HEADING 'Name'
COLUMN locked_mode FORMAT a9 HEADING 'Locked Mode'
COLUMN sid FORMAT 999 HEADING 'SID'
COLUMN username FORMAT a15 HEADING 'Database User'
COLUMN osuser FORMAT a10 HEADING 'O/S User'
COLUMN logon_time HEADING 'Login Time'
SELECT
SUBSTR(b.owner, 1, 8) owner
, b.object_type object_type
, SUBSTR(b.object_name, 1, 18) object_name
, DECODE(a.locked_mode
, 0, 'None'
, 1, 'Null'
, 2, 'Row-S'
, 3, 'Row-X'
, 4, 'Share'
, 5, 'S/Row-X'
, 6, 'Exclusive') locked_mode
, a.session_id sid
,a.oracle_username username
, a.os_user_name osuser
, TO_CHAR(c.logon_time,'YYYY/MM/DD HH24:MI:SS') logon_time
FROM
v$locked_object a
, dba_objects b
, v$session c
WHERE
a.object_id = b.object_id
AND a.session_id = c.sid
ORDER BY
b.owner
, b.object_type
, b.object_name
/
col USERNAME for a20
select
s.sid,s.serial#,s.username, s.status,
s.osuser,o.object_name, q.sql_text
from v$session s, v$sqlarea q,v$locked_object l,dba_objects o
where s.sql_hash_value = q.hash_value
and s.sql_address = q.address
and l.object_id=o.object_id
and l.session_id=s.sid
and s.username = 'SCOTT';
select
s.sid,s.serial#,s.username, s.status,
s.osuser,o.object_name
from v$session s,v$locked_object l,dba_objects o
where
l.object_id=o.object_id
and l.session_id=s.sid
and s.username = 'SCOTT';
Few scripts to identify Blocking locks.
=======================================
$ORACLE_HOME/rdbms/admin/utllockt.sql
The above script provided by oracle will give you a "tree" structure of
which sessions are holding locks that are affecting other users.
select * from v$lock ;
To identify the blocking/blocked sessions.
==========================================
select l1.sid, ' IS BLOCKING ', l2.sid from v$lock l1, v$lock l2
where l1.block =1 and l2.request > 0 and l1.id1=l2.id1 and l1.id2=l2.id2;
or
Same in a readable format.
============================
select s1.username || '@' || s1.machine || ' ( SID=' || s1.sid || ' ) is blocking ' || s2.username || '@' || s2.machine || ' ( SID=' || s2.sid || ' ) ' AS blocking_status
from v$lock l1, v$session s1, v$lock l2, v$session s2
where s1.sid=l1.sid and s2.sid=l2.sid and l1.BLOCK=1 and l2.request > 0
and l1.id1 = l2.id1
and l2.id2 = l2.id2 ;
To Identify the locked object and row
======================================
select do.object_name, row_wait_obj#, row_wait_file#, row_wait_block#, row_wait_row#,
dbms_rowid.rowid_create ( 1, ROW_WAIT_OBJ#, ROW_WAIT_FILE#, ROW_WAIT_BLOCK#, ROW_WAIT_ROW# )
from v$session s, dba_objects do where sid=122 and s.ROW_WAIT_OBJ# = do.OBJECT_ID ;
Identify deadlock
=================
select sid,DECODE(block,0,'NO','YES') BLOCKER,
DECODE(request,0,'NO','YES') WAITER
from v$lock
Where request > 0 or block > 0
order by block desc;
Session Wait
============
col sid form 9999
col osuser form a14
col username form a15
col wait form 9999999
col event form a30
select w.sid,s.username,s.osuser,w.seconds_in_wait wait,
w.event
from v$session_wait w,v$session s
where lower(w.event) not in (
'null event',
'client message',
'pmon timer',
'rdbms ipc message',
'rdbms ipc reply',
'smon timer',
lower('WMON goes to sleep'),
'virtual circuit status',
'dispatcher timer',
lower('SQL*Net message from client'),
'parallel query dequeue wait',
lower('Parallel Query Idle Wait - Slaves'),
lower('KXFX: Execution Message Dequeue - Slave'),
'slave wait')
and s.sid = w.sid
order by wait;
MTS
===
select paddr,type,queued,wait,totalq, decode(totalq,0,0,(wait/totalq)) "AVG WAIT" from v$queue
/
select name "NAME", substr(network,1,15) "PROTOCOL",status "STATUS", (busy/(busy + idle)) * 100 "% TIME BUSY" from v$dispatcher
/
select name, status, requests, (busy/(busy+idle)) * 100 "% TIME BUSY" from v$shared_server
/
Performance tuninng
===================
#REM#Buffer cache hit ratio
/***********************************************
This script is used to calculate hit ratio of data buffer.
If the hit ratio is less than 80%-90%, increase the db_buffer_buffers by using
alter system set db_cache_size=”bigger size” scope=both;
************************************************/
column xn1 format a50
column xn2 format a50
column xn3 format a50
column xv1 new_value xxv1 noprint
column xv2 new_value xxv2 noprint
column xv3 new_value xxv3 noprint
column d1 format a50
column d2 format a50
prompt HIT RATIO:
prompt
prompt Values Hit Ratio is calculated against:
prompt
select lpad(name,20,' ')||' = '||value xn1, value xv1
from v$sysstat
where name = 'db block gets'
/
select lpad(name,20,' ')||' = '||value xn2, value xv2
from v$sysstat
where name = 'consistent gets'
/
select lpad(name,20,' ')||' = '||value xn3, value xv3
from v$sysstat b
where name = 'physical reads'
/
set pages 60
select 'Logical reads = db block gets + consistent gets ',
lpad ('Logical Reads = ',24,' ')||to_char(&xxv1+&xxv2) d1
from dual
/
select 'Hit Ratio = (logical reads - physical reads) / logical reads',
lpad('Hit Ratio = ',24,' ')||
round( (((&xxv2+&xxv1) - &xxv3) / (&xxv2+&xxv1))*100,2 )||'%' d2
from dual
/
column "Consistent Gets" format 9,999,999,999,999
column "DB Block Gets" format 9,999,999,999,999
column "Hit Ratio" format 999.99
SELECT (1 - (phys.value / (db.value + cons.value))) * 100
FROM v$sysstat phys,
v$sysstat db,
v$sysstat cons
WHERE phys.name = 'physical reads'
AND db.name = 'db block gets'
AND cons.name = 'consistent gets';
set serveroutput on;
variable g char(10);
variable d number;
variable c number;
variable r number;
begin
select substr(global_name,1,10) into :g
from global_name;
select value into :c
from v$sysstat
where name = 'consistent gets';
select value into :d
from v$sysstat
where name = 'db block gets';
select to_number(substr((1- value/( :c + :d)),1,5))*100 into :r
from v$sysstat
where name in ( 'physical reads');
dbms_output.put_line('Buffer cache hit ratio for '||:g||' is '||:r||'%');
end;
/
#REM#Dict. Cache Hit Ratio
SELECT (1 - (Sum(getmisses)/(Sum(gets) + Sum(getmisses)))) * 100
FROM v$rowcache;
#REM#Library Cache Hit Ratio
SELECT (1 -(Sum(reloads)/(Sum(pins) + Sum(reloads)))) * 100 from v$librarycache;
#REM#Latch Hit Ratio
SELECT (1 - (Sum(misses) / Sum(gets))) * 100 FROM v$latch;
#REM# Sort Ratio
select (disk.value/mem.value) * 100 from v$sysstat disk, v$sysstat mem where disk.name='sorts(disk)'
and mem.name='sorts(memory)';
#REM#Rollback segment Waits
select (sum(waits)/sum(gets)) * 100 from v$rollstat;
#REM#Shared Pool Size (Execution Misses)
select sum(pins) "Executions" , sum(reloads) "Cache Misses Executing", (sum(reloads)/sum(pins) * 100) "% Ratio"
from v$librarycache;
#REM#Shared Pool Size (Dictionary Gets)
select sum(gets) "Data Dictonary Gets", sum(getmisses) "Get Misses" , 100*(sum(getmisses)/sum(gets)) "% Ratio"
from v$rowcache;
#REM#If % Ratio is above 1%, increase SHARED_POOL_SIZE
select sum(pins) "Executions",
sum(reloads) "Cache Misses Executing",
(sum(reloads)/sum(pins)*100) "% Ratio"
from v$librarycache
#REM#If % Ratio is above 12%, increase SHARED_POOL_SIZE
select sum(gets) "Data Dictionary Gets",
sum(getmisses) "Get Misses",
100*(sum(getmisses)/sum(gets)) "% Ratio"
from v$rowcache
/
#REM#If the Value is not near 0, increase LOG_BUFFER
select substr(name,1,25) Name,
substr(value,1,15) "VALUE (Near 0?)"
from v$sysstat
where name = 'redo log space requests';
#REM# Physical read and write (IO)
SET PAGESIZE 1000
SELECT Substr(d.name,1,50) "File Name",
f.phyblkrd "Blocks Read",
f.phyblkwrt "Blocks Writen",
f.phyblkrd + f.phyblkwrt "Total I/O"
FROM v$filestat f,
v$datafile d
WHERE d.file# = f.file#
ORDER BY f.phyblkrd + f.phyblkwrt DESC;
#REM# Session IO
SELECT * FROM v$sess_io WHERE SID=117;
#REM#Pls substitute the value for no of top SQL's consuming resources
SET LINESIZE 500
SET PAGESIZE 1000
SET FEEDBACK OFF
SET VERIFY OFF
SET SERVEROUTPUT ON
PROMPT
DECLARE
CURSOR c_sql IS
SELECT Substr(a.sql_text,1,50) sql_text,
Trunc(a.disk_reads/Decode(a.executions,0,1,a.executions)) reads_per_execution,
a.buffer_gets,
a.disk_reads,
a.executions,
a.sorts,
a.address
FROM v$sqlarea a
ORDER BY 2 DESC;
BEGIN
Dbms_Output.Enable(1000000);
Dbms_Output.Put_Line(Rpad('SQL Text',50,' ') ||
Lpad('Reads/Execution',16,' ') ||
Lpad('Buffer Gets',12,' ') ||
Lpad('Disk Reads',12,' ') ||
Lpad('Executions',12,' ') ||
Lpad('Sorts',12,' ') ||
Lpad('Address',10,' '));
Dbms_Output.Put_Line(Rpad('-',50,'-') || ' ' ||
Lpad('-',15,'-') || ' ' ||
Lpad('-',11,'-') || ' ' ||
Lpad('-',11,'-') || ' ' ||
Lpad('-',11,'-') || ' ' ||
Lpad('-',11,'-') || ' ' ||
Lpad('-',9,'-'));
<< top_sql >>
FOR cur_rec IN c_sql LOOP
Dbms_Output.Put_Line(Rpad(cur_rec.sql_text,50,' ') ||
Lpad(cur_rec.reads_per_execution,16,' ') ||
Lpad(cur_rec.buffer_gets,12,' ') ||
Lpad(cur_rec.disk_reads,12,' ') ||
Lpad(cur_rec.executions,12,' ') ||
Lpad(cur_rec.sorts,12,' ') ||
Lpad(cur_rec.address,10,' '));
IF c_sql%ROWCOUNT = &&1 THEN
EXIT top_sql;
END IF;
END LOOP;
END;
/
OVERALL CPU USAGE
=================
select distinct a.name, a.value "Total CPU",
b.value "Parse CPU",
c.value "Recursive CPU",
(a.value - b.value - c.value) "Other"
from v$sysstat a, v$sysstat b, v$sysstat c
where a.name = 'CPU used by this session'
and b.name = 'parse CPU time'
and c.name = 'recursive CPU';
SID which has taken maximum CPU
===============================
col username form a15
set pages 10000
col name form a30
select s.username,se.sid,st.name,se.value/(60*100) "In Minutes"
from v$sesstat se,v$statname st,v$session s
where se.STATISTIC#=st.STATISTIC#
and st.name like '%CPU%'
and se.value > 12000
and s.sid=se.sid
order by se.value/(60*100);
CPU USAGE PER SESSION
====================
select distinct a.name, a.value "Total CPU",
b.value "Parse CPU",
c.value "Recursive CPU",
(a.value - b.value - c.value) "Other CPU"
from v$sysstat a, v$sysstat b, v$sysstat c
where a.statistic#=12
and b.statistic#=150
and c.statistic#=8;
Memory used by non applicaton user
==================================
SELECT A.username, COUNT(*), SUM (B.sharable_mem) sharable_mem,
SUM (B.persistent_mem) persistent_mem,
SUM (B.runtime_mem) runtime_mem,
SUM (B.sharable_mem + B.persistent_mem + B.runtime_mem) total_mem
FROM dba_users A, v$sql B
WHERE A.username IN ('DBSNMP', 'SYS', 'SYSTEM', 'SYSMAN')
AND B.parsing_user_id = A.user_id
GROUP BY A.username;
Latch Contention
================
col name form A25
col gets form 999,999,999
col misses form 999.99
col spins form 999.99
col igets form 999,999,999
col imisses form 999.99
select name,gets,misses*100/decode(gets,0,1,gets) misses,
spin_gets*100/decode(misses,0,1,misses) spins, immediate_gets igets
,immediate_misses*100/decode(immediate_gets,0,1,immediate_gets) imisses
from v$latch order by gets + immediate_gets
/
Analyze
=======
Validate structure
Select 'Analyze '||object_type||' '||object_name||' validate structure;'
from dba_objects
where owner='SYS'
and object_type in ('INDEX','TABLE','CLUSTER');
Gather stats with Histograms
============================
begin
dbms_stats.gather_schema_stats(
ownname => 'HR',
estimate_percent => dbms_stats.auto_sample_size,
method_opt => 'FOR ALL INDEXED COLUMNS SIZE AUTO',
cascade => true,
degree => 2
);
end;
/
Script to do collect statistics for HR schema at 9 PM
=====================================================
BEGIN
DBMS_SCHEDULER.DROP_JOB (
job_name => 'HR_STATS_REFRESH'
,force => TRUE);
DBMS_SCHEDULER.CREATE_JOB (
job_name => 'HR_STATS_REFRESH'
,job_type => 'PLSQL_BLOCK'
,job_action => 'BEGIN DBMS_STATS.GATHER_SCHEMA_STATS(''HR'');END;'
,start_date => '09/01/2004 09:00:00.000000'
,repeat_interval => 'FREQ=DAILY'
,enabled => FALSE
,comments => 'Refreshes the HR Schema every night at 9 PM'
);
END;
/
Archiver
========
#REM# show db in archive log mode or not
archive log list
#REM Arciver gap
SELECT MAX(R.SEQUENCE#) LAST_SEQ_RECD, MAX(L.SEQUENCE#) LAST_SEQ_SENT FROM V$ARCHIVED_LOG R, V$LOG L WHERE R.DEST_ID=2 AND L.ARCHIVED='YES'
/
Hanganalyze
===========
SQL> ALTER SESSION SET MAX_DUMP_FILE_SIZE=UNLIMITED;
Session altered.
SQL> oradebug setmypid
Statement processed.
SQL> oradebug hanganalyze 3
Usefull SQL's
=============
#REM# delete duplicate rows from table
delete from dummy where rowid not in (select min(rowid) from dummy group by n1);
#REM Total db size
select sum(bytes)/1024/1024 allocated_space from dba_data_files;
select name from v$database;
select sum(bytes)/1024/1024 from dba_temp_files;
Find the primary key column
===========================
set linesize 160;
set pages 50000;
col table_name for a45;
col column_name for a60;
col constraint_name for a45;
select t.TABLE_NAME,t.COLUMN_NAME,t.CONSTRAINT_NAME,c.CONSTRAINT_TYPE from USER_CONSTRAINTS c,user_cons_columns t where t.CONSTRAINT_NAME=c.CONSTRAINT_NAME and t.TABLE_NAME=c.TABLE_NAME and constraint_type='P' order by t.COLUMN_NAME;
SELECT CON.TABLE_NAME,COL.COLUMN_NAME,CON.CONSTRAINT_TYPE FROM DBA_CONSTRAINTS CON,DBA_CONS_COLUMNS COL
WHERE CON.TABLE_NAME=COL.TABLE_NAME AND CON.CONSTRAINT_NAME=COL.CONSTRAINT_NAME AND CON.OWNER='SCOTT' AND COL.OWNER='SCOTT' AND CON.CONSTRAINT_TYPE IN ('P','U') ORDER BY CON.TABLE_NAME;
======================================================
SELECT userenv('SID') SID,sys_context('USERENV', 'CURRENT_SCHEMA') SCHEMA,sys_context('USERENV', 'OS_USER') OSUSER,sys_context('USERENV', 'HOST') HOSTNAME FROM DUAL;
Tracing
=======
At session level
================
exec dbms_system.set_sql_trace_in_session(1473, 32781, TRUE);
select 'exec dbms_system.set_sql_trace_in_session('||s.sid||','||s.serial#||', TRUE);' from v$session s,v$process p
where s.paddr = p.addr and
s.username='SCOTT';
At DB level
===========
ALTER SYSTEM SET trace_enabled = TRUE;
Execute below if trace shows bind variables and wait events
ALTER SYSTEM SET EVENT=’10046 trace name context forever, level 12’ scope=memory;
tkprof dbname_ora_17776.trc testtrace.txt explain=dev/dev@dbname sys=no waits=yes sort=exeela,userid
ls -l dbname_ora*.trc | awk '{print "tkprof "$9" "$9".txt explain=dev/dev@dbname sys=no waits=yes sort=exeela,userid"}'
User session as per Logon time
==============================
set pages 50000;
set linesize 160;
col username for a15
col osuser for a10
col machine for a20
col program for a35
select s.sid,s.serial#,p.spid,s.status,s.username,s.osuser,s.machine,s.program,to_char(s.logon_time,'DD-MON-YYYY HH24:MI:SS') logged_on from v$session s,v$process p where s.paddr = p.addr and s.username='SCOTT'
order by s.osuser;
To kill all Session for user
============================
select 'alter system kill session '''||sid||','||serial#||''';' from v$session where username in ('SCOTT');
Current active session's and sql they are running
================================================
set pages 10000
col username form a20
col machine form a30
col osuser form a10
select s.username,s.sid,s.serial#,s.osuser,p.spid,s.machine,
sql_text,buffer_gets,executions
from v$sqlarea , v$session s,v$process p
where s.sql_address = address
and s.status = 'ACTIVE'
and s.paddr = p.addr
and s.username is not null
order by buffer_gets desc;
List all open cursor
====================
SELECT SID,USER_NAME,SQL_TEXT FROM V$OPEN_CURSOR ORDER BY 1;
Tablespace Usage and freespace
==============================
SET SERVEROUTPUT ON
SET PAGESIZE 1000
SET LINESIZE 160
SELECT Substr(df.tablespace_name,1,20) "Tablespace Name",
Substr(df.file_name,1,40) "File Name",
Round(df.bytes/1024/1024,2) "Size (M)",
Round(e.used_bytes/1024/1024,2) "Used (M)",
Round(f.free_bytes/1024/1024,2) "Free (M)"
FROM DBA_DATA_FILES DF,
(SELECT file_id,
Sum(Decode(bytes,NULL,0,bytes)) used_bytes
FROM dba_extents
GROUP by file_id) E,
(SELECT Max(bytes) free_bytes,
file_id
FROM dba_free_space
GROUP BY file_id) f
WHERE e.file_id (+) = df.file_id
AND df.file_id = f.file_id (+)
ORDER BY df.tablespace_name,
df.file_name;
Undo Information
================
column "UserName" format a8
column "DB Sid" format 999999
column "Unix Pid" format 99999999
column "Trnx_start_time" format a19
column "Current Time" format a19
column "Elapsed(mins)" format 999999999.99
column "Undo Name" format a09
column "Used Undo Blks" format a13
column "Used Undo Size(Kb)" format a17
column "Logical I/O(Blks)" format 99999999999999999
column "Logical I/O(Kb)" format 999999999999999
column "Physical I/O(Blks)" format 999999999999999999
column "Physical I/O(Kb)" format 999999999999999999
SELECT
a.username "UserName"
, a.sid "DB Sid"
, e.spid "Unix Pid"
, TO_CHAR(TO_DATE(b.start_time,'mm/dd/yy hh24:mi:ss'),'yyyy/mm/dd hh24:mi:ss') "Trnx_start_time"
, TO_CHAR(sysdate,'yyyy/mm/dd hh24:mi:ss') "Current Time"
, ROUND(60*24*(sysdate-to_date(b.start_time,'mm/dd/yy hh24:mi:ss')),2) "Elapsed(mins)"
, c.segment_name "Undo Name"
, TO_CHAR(b.used_ublk*d.value/1024) "Used Undo Size(Kb)"
, TO_CHAR(b.used_ublk) "Used Undo Blks"
, b.log_io "Logical I/O(Blks)"
, b.log_io*d.value/1024 "Logical I/O(Kb)"
, b.phy_io "Physical I/O(Blks)"
, b.phy_io*d.value/1024 "Physical I/O(Kb)"
, a.program
FROM
v$session a
, v$transaction b
, dba_rollback_segs c
, v$parameter d
, v$process e
WHERE
b.ses_addr = a.saddr
AND b.xidusn = c.segment_id
AND d.name = 'db_block_size'
AND e.ADDR = a.PADDR
ORDER BY 4
/
select e.sid,e.serial#, substr(a.os_user_name,1,8) "OS User"
, substr(a.oracle_username,1,8) "DB User"
, substr(b.owner,1,8) "Schema"
, substr(b.object_name,1,20) "Object Name"
, substr(b.object_type,1,10) "Type"
, substr(c.segment_name,1,15) "RBS"
, substr(d.used_urec,1,12) "# of Records"
,e.program
from v$locked_object a
, dba_objects b
, dba_rollback_segs c
, v$transaction d
, v$session e
where a.object_id = b.object_id
and a.xidusn = c.segment_id
and a.xidusn = d.xidusn
and a.xidslot = d.xidslot
and d.addr = e.taddr
/
select e.sid
, oracle_username
, b.owner
, b.object_name
, b.object_type
, segment_name
, d.used_urec
, sysdate
from v$locked_object a
, dba_objects b
, dba_rollback_segs c
, v$transaction d
, v$session e
where a.object_id = b.object_id
and a.xidusn = c.segment_id
and a.xidusn = d.xidusn
and a.xidslot = d.xidslot
and d.addr = e.taddr
/
TEMP Tablespace Usage
=====================
SELECT b.sid,b.serial#,a.sql_text
FROM v$sqltext a,
v$session b
WHERE a.address = b.sql_address
AND a.hash_value = b.sql_hash_value
AND b.sid in (select se.sid
from v$sort_usage so,v$session se
where so.session_addr = se.saddr
and se.USERNAME not in ('SYSTEM')
and so.blocks*8/1024 >100)
ORDER BY b.sid,a.piece;
Active session
==============
select s.sid,s.serial#,p.spid,s.username,s.osuser,s.machine, sql_text,v$sqlarea.hash_value,
logon_time,last_call_et,sysdate
from v$sqlarea , v$session s,v$process p
where s.sql_address = address
and s.status = 'ACTIVE'
and s.paddr = p.addr
and s.username is not null
and last_call_et > 0
order by last_call_et
/
select s.username,s.sid,s.serial#,s.osuser,p.spid,s.machine, sql_text,buffer_gets,executions,buffer_gets/executions "gets_per_exec" ,v$sqlarea.hash_value
from v$sqlarea , v$session s,v$process p
where s.sql_address = address
and s.status = 'ACTIVE'
and s.paddr = p.addr
and s.username is not null
and executions > 0
order by buffer_gets desc
/
Locks and Blocking information
==============================
SET LINESIZE 145
SET PAGESIZE 66
COLUMN owner FORMAT a5 HEADING 'Owner'
COLUMN object_type FORMAT a10 HEADING 'Type'
COLUMN object_name FORMAT a25 HEADING 'Name'
COLUMN locked_mode FORMAT a9 HEADING 'Locked Mode'
COLUMN sid FORMAT 999 HEADING 'SID'
COLUMN username FORMAT a15 HEADING 'Database User'
COLUMN osuser FORMAT a10 HEADING 'O/S User'
COLUMN logon_time HEADING 'Login Time'
SELECT
SUBSTR(b.owner, 1, 8) owner
, b.object_type object_type
, SUBSTR(b.object_name, 1, 18) object_name
, DECODE(a.locked_mode
, 0, 'None'
, 1, 'Null'
, 2, 'Row-S'
, 3, 'Row-X'
, 4, 'Share'
, 5, 'S/Row-X'
, 6, 'Exclusive') locked_mode
, a.session_id sid
,a.oracle_username username
, a.os_user_name osuser
, TO_CHAR(c.logon_time,'YYYY/MM/DD HH24:MI:SS') logon_time
FROM
v$locked_object a
, dba_objects b
, v$session c
WHERE
a.object_id = b.object_id
AND a.session_id = c.sid
ORDER BY
b.owner
, b.object_type
, b.object_name
/
col USERNAME for a20
select
s.sid,s.serial#,s.username, s.status,
s.osuser,o.object_name, q.sql_text
from v$session s, v$sqlarea q,v$locked_object l,dba_objects o
where s.sql_hash_value = q.hash_value
and s.sql_address = q.address
and l.object_id=o.object_id
and l.session_id=s.sid
and s.username = 'SCOTT';
select
s.sid,s.serial#,s.username, s.status,
s.osuser,o.object_name
from v$session s,v$locked_object l,dba_objects o
where
l.object_id=o.object_id
and l.session_id=s.sid
and s.username = 'SCOTT';
Few scripts to identify Blocking locks.
=======================================
$ORACLE_HOME/rdbms/admin/utllockt.sql
The above script provided by oracle will give you a "tree" structure of
which sessions are holding locks that are affecting other users.
select * from v$lock ;
To identify the blocking/blocked sessions.
==========================================
select l1.sid, ' IS BLOCKING ', l2.sid from v$lock l1, v$lock l2
where l1.block =1 and l2.request > 0 and l1.id1=l2.id1 and l1.id2=l2.id2;
or
Same in a readable format.
============================
select s1.username || '@' || s1.machine || ' ( SID=' || s1.sid || ' ) is blocking ' || s2.username || '@' || s2.machine || ' ( SID=' || s2.sid || ' ) ' AS blocking_status
from v$lock l1, v$session s1, v$lock l2, v$session s2
where s1.sid=l1.sid and s2.sid=l2.sid and l1.BLOCK=1 and l2.request > 0
and l1.id1 = l2.id1
and l2.id2 = l2.id2 ;
To Identify the locked object and row
======================================
select do.object_name, row_wait_obj#, row_wait_file#, row_wait_block#, row_wait_row#,
dbms_rowid.rowid_create ( 1, ROW_WAIT_OBJ#, ROW_WAIT_FILE#, ROW_WAIT_BLOCK#, ROW_WAIT_ROW# )
from v$session s, dba_objects do where sid=122 and s.ROW_WAIT_OBJ# = do.OBJECT_ID ;
Identify deadlock
=================
select sid,DECODE(block,0,'NO','YES') BLOCKER,
DECODE(request,0,'NO','YES') WAITER
from v$lock
Where request > 0 or block > 0
order by block desc;
Session Wait
============
col sid form 9999
col osuser form a14
col username form a15
col wait form 9999999
col event form a30
select w.sid,s.username,s.osuser,w.seconds_in_wait wait,
w.event
from v$session_wait w,v$session s
where lower(w.event) not in (
'null event',
'client message',
'pmon timer',
'rdbms ipc message',
'rdbms ipc reply',
'smon timer',
lower('WMON goes to sleep'),
'virtual circuit status',
'dispatcher timer',
lower('SQL*Net message from client'),
'parallel query dequeue wait',
lower('Parallel Query Idle Wait - Slaves'),
lower('KXFX: Execution Message Dequeue - Slave'),
'slave wait')
and s.sid = w.sid
order by wait;
MTS
===
select paddr,type,queued,wait,totalq, decode(totalq,0,0,(wait/totalq)) "AVG WAIT" from v$queue
/
select name "NAME", substr(network,1,15) "PROTOCOL",status "STATUS", (busy/(busy + idle)) * 100 "% TIME BUSY" from v$dispatcher
/
select name, status, requests, (busy/(busy+idle)) * 100 "% TIME BUSY" from v$shared_server
/
Performance tuninng
===================
#REM#Buffer cache hit ratio
/***********************************************
This script is used to calculate hit ratio of data buffer.
If the hit ratio is less than 80%-90%, increase the db_buffer_buffers by using
alter system set db_cache_size=”bigger size” scope=both;
************************************************/
column xn1 format a50
column xn2 format a50
column xn3 format a50
column xv1 new_value xxv1 noprint
column xv2 new_value xxv2 noprint
column xv3 new_value xxv3 noprint
column d1 format a50
column d2 format a50
prompt HIT RATIO:
prompt
prompt Values Hit Ratio is calculated against:
prompt
select lpad(name,20,' ')||' = '||value xn1, value xv1
from v$sysstat
where name = 'db block gets'
/
select lpad(name,20,' ')||' = '||value xn2, value xv2
from v$sysstat
where name = 'consistent gets'
/
select lpad(name,20,' ')||' = '||value xn3, value xv3
from v$sysstat b
where name = 'physical reads'
/
set pages 60
select 'Logical reads = db block gets + consistent gets ',
lpad ('Logical Reads = ',24,' ')||to_char(&xxv1+&xxv2) d1
from dual
/
select 'Hit Ratio = (logical reads - physical reads) / logical reads',
lpad('Hit Ratio = ',24,' ')||
round( (((&xxv2+&xxv1) - &xxv3) / (&xxv2+&xxv1))*100,2 )||'%' d2
from dual
/
column "Consistent Gets" format 9,999,999,999,999
column "DB Block Gets" format 9,999,999,999,999
column "Hit Ratio" format 999.99
SELECT (1 - (phys.value / (db.value + cons.value))) * 100
FROM v$sysstat phys,
v$sysstat db,
v$sysstat cons
WHERE phys.name = 'physical reads'
AND db.name = 'db block gets'
AND cons.name = 'consistent gets';
set serveroutput on;
variable g char(10);
variable d number;
variable c number;
variable r number;
begin
select substr(global_name,1,10) into :g
from global_name;
select value into :c
from v$sysstat
where name = 'consistent gets';
select value into :d
from v$sysstat
where name = 'db block gets';
select to_number(substr((1- value/( :c + :d)),1,5))*100 into :r
from v$sysstat
where name in ( 'physical reads');
dbms_output.put_line('Buffer cache hit ratio for '||:g||' is '||:r||'%');
end;
/
#REM#Dict. Cache Hit Ratio
SELECT (1 - (Sum(getmisses)/(Sum(gets) + Sum(getmisses)))) * 100
FROM v$rowcache;
#REM#Library Cache Hit Ratio
SELECT (1 -(Sum(reloads)/(Sum(pins) + Sum(reloads)))) * 100 from v$librarycache;
#REM#Latch Hit Ratio
SELECT (1 - (Sum(misses) / Sum(gets))) * 100 FROM v$latch;
#REM# Sort Ratio
select (disk.value/mem.value) * 100 from v$sysstat disk, v$sysstat mem where disk.name='sorts(disk)'
and mem.name='sorts(memory)';
#REM#Rollback segment Waits
select (sum(waits)/sum(gets)) * 100 from v$rollstat;
#REM#Shared Pool Size (Execution Misses)
select sum(pins) "Executions" , sum(reloads) "Cache Misses Executing", (sum(reloads)/sum(pins) * 100) "% Ratio"
from v$librarycache;
#REM#Shared Pool Size (Dictionary Gets)
select sum(gets) "Data Dictonary Gets", sum(getmisses) "Get Misses" , 100*(sum(getmisses)/sum(gets)) "% Ratio"
from v$rowcache;
#REM#If % Ratio is above 1%, increase SHARED_POOL_SIZE
select sum(pins) "Executions",
sum(reloads) "Cache Misses Executing",
(sum(reloads)/sum(pins)*100) "% Ratio"
from v$librarycache
#REM#If % Ratio is above 12%, increase SHARED_POOL_SIZE
select sum(gets) "Data Dictionary Gets",
sum(getmisses) "Get Misses",
100*(sum(getmisses)/sum(gets)) "% Ratio"
from v$rowcache
/
#REM#If the Value is not near 0, increase LOG_BUFFER
select substr(name,1,25) Name,
substr(value,1,15) "VALUE (Near 0?)"
from v$sysstat
where name = 'redo log space requests';
#REM# Physical read and write (IO)
SET PAGESIZE 1000
SELECT Substr(d.name,1,50) "File Name",
f.phyblkrd "Blocks Read",
f.phyblkwrt "Blocks Writen",
f.phyblkrd + f.phyblkwrt "Total I/O"
FROM v$filestat f,
v$datafile d
WHERE d.file# = f.file#
ORDER BY f.phyblkrd + f.phyblkwrt DESC;
#REM# Session IO
SELECT * FROM v$sess_io WHERE SID=117;
#REM#Pls substitute the value for no of top SQL's consuming resources
SET LINESIZE 500
SET PAGESIZE 1000
SET FEEDBACK OFF
SET VERIFY OFF
SET SERVEROUTPUT ON
PROMPT
DECLARE
CURSOR c_sql IS
SELECT Substr(a.sql_text,1,50) sql_text,
Trunc(a.disk_reads/Decode(a.executions,0,1,a.executions)) reads_per_execution,
a.buffer_gets,
a.disk_reads,
a.executions,
a.sorts,
a.address
FROM v$sqlarea a
ORDER BY 2 DESC;
BEGIN
Dbms_Output.Enable(1000000);
Dbms_Output.Put_Line(Rpad('SQL Text',50,' ') ||
Lpad('Reads/Execution',16,' ') ||
Lpad('Buffer Gets',12,' ') ||
Lpad('Disk Reads',12,' ') ||
Lpad('Executions',12,' ') ||
Lpad('Sorts',12,' ') ||
Lpad('Address',10,' '));
Dbms_Output.Put_Line(Rpad('-',50,'-') || ' ' ||
Lpad('-',15,'-') || ' ' ||
Lpad('-',11,'-') || ' ' ||
Lpad('-',11,'-') || ' ' ||
Lpad('-',11,'-') || ' ' ||
Lpad('-',11,'-') || ' ' ||
Lpad('-',9,'-'));
<< top_sql >>
FOR cur_rec IN c_sql LOOP
Dbms_Output.Put_Line(Rpad(cur_rec.sql_text,50,' ') ||
Lpad(cur_rec.reads_per_execution,16,' ') ||
Lpad(cur_rec.buffer_gets,12,' ') ||
Lpad(cur_rec.disk_reads,12,' ') ||
Lpad(cur_rec.executions,12,' ') ||
Lpad(cur_rec.sorts,12,' ') ||
Lpad(cur_rec.address,10,' '));
IF c_sql%ROWCOUNT = &&1 THEN
EXIT top_sql;
END IF;
END LOOP;
END;
/
OVERALL CPU USAGE
=================
select distinct a.name, a.value "Total CPU",
b.value "Parse CPU",
c.value "Recursive CPU",
(a.value - b.value - c.value) "Other"
from v$sysstat a, v$sysstat b, v$sysstat c
where a.name = 'CPU used by this session'
and b.name = 'parse CPU time'
and c.name = 'recursive CPU';
SID which has taken maximum CPU
===============================
col username form a15
set pages 10000
col name form a30
select s.username,se.sid,st.name,se.value/(60*100) "In Minutes"
from v$sesstat se,v$statname st,v$session s
where se.STATISTIC#=st.STATISTIC#
and st.name like '%CPU%'
and se.value > 12000
and s.sid=se.sid
order by se.value/(60*100);
CPU USAGE PER SESSION
====================
select distinct a.name, a.value "Total CPU",
b.value "Parse CPU",
c.value "Recursive CPU",
(a.value - b.value - c.value) "Other CPU"
from v$sysstat a, v$sysstat b, v$sysstat c
where a.statistic#=12
and b.statistic#=150
and c.statistic#=8;
Memory used by non applicaton user
==================================
SELECT A.username, COUNT(*), SUM (B.sharable_mem) sharable_mem,
SUM (B.persistent_mem) persistent_mem,
SUM (B.runtime_mem) runtime_mem,
SUM (B.sharable_mem + B.persistent_mem + B.runtime_mem) total_mem
FROM dba_users A, v$sql B
WHERE A.username IN ('DBSNMP', 'SYS', 'SYSTEM', 'SYSMAN')
AND B.parsing_user_id = A.user_id
GROUP BY A.username;
Latch Contention
================
col name form A25
col gets form 999,999,999
col misses form 999.99
col spins form 999.99
col igets form 999,999,999
col imisses form 999.99
select name,gets,misses*100/decode(gets,0,1,gets) misses,
spin_gets*100/decode(misses,0,1,misses) spins, immediate_gets igets
,immediate_misses*100/decode(immediate_gets,0,1,immediate_gets) imisses
from v$latch order by gets + immediate_gets
/
Analyze
=======
Validate structure
Select 'Analyze '||object_type||' '||object_name||' validate structure;'
from dba_objects
where owner='SYS'
and object_type in ('INDEX','TABLE','CLUSTER');
Gather stats with Histograms
============================
begin
dbms_stats.gather_schema_stats(
ownname => 'HR',
estimate_percent => dbms_stats.auto_sample_size,
method_opt => 'FOR ALL INDEXED COLUMNS SIZE AUTO',
cascade => true,
degree => 2
);
end;
/
Script to do collect statistics for HR schema at 9 PM
=====================================================
BEGIN
DBMS_SCHEDULER.DROP_JOB (
job_name => 'HR_STATS_REFRESH'
,force => TRUE);
DBMS_SCHEDULER.CREATE_JOB (
job_name => 'HR_STATS_REFRESH'
,job_type => 'PLSQL_BLOCK'
,job_action => 'BEGIN DBMS_STATS.GATHER_SCHEMA_STATS(''HR'');END;'
,start_date => '09/01/2004 09:00:00.000000'
,repeat_interval => 'FREQ=DAILY'
,enabled => FALSE
,comments => 'Refreshes the HR Schema every night at 9 PM'
);
END;
/
Archiver
========
#REM# show db in archive log mode or not
archive log list
#REM Arciver gap
SELECT MAX(R.SEQUENCE#) LAST_SEQ_RECD, MAX(L.SEQUENCE#) LAST_SEQ_SENT FROM V$ARCHIVED_LOG R, V$LOG L WHERE R.DEST_ID=2 AND L.ARCHIVED='YES'
/
Hanganalyze
===========
SQL> ALTER SESSION SET MAX_DUMP_FILE_SIZE=UNLIMITED;
Session altered.
SQL> oradebug setmypid
Statement processed.
SQL> oradebug hanganalyze 3
Usefull SQL's
=============
#REM# delete duplicate rows from table
delete from dummy where rowid not in (select min(rowid) from dummy group by n1);
#REM Total db size
select sum(bytes)/1024/1024 allocated_space from dba_data_files;
select name from v$database;
select sum(bytes)/1024/1024 from dba_temp_files;
Find the primary key column
===========================
set linesize 160;
set pages 50000;
col table_name for a45;
col column_name for a60;
col constraint_name for a45;
select t.TABLE_NAME,t.COLUMN_NAME,t.CONSTRAINT_NAME,c.CONSTRAINT_TYPE from USER_CONSTRAINTS c,user_cons_columns t where t.CONSTRAINT_NAME=c.CONSTRAINT_NAME and t.TABLE_NAME=c.TABLE_NAME and constraint_type='P' order by t.COLUMN_NAME;
SELECT CON.TABLE_NAME,COL.COLUMN_NAME,CON.CONSTRAINT_TYPE FROM DBA_CONSTRAINTS CON,DBA_CONS_COLUMNS COL
WHERE CON.TABLE_NAME=COL.TABLE_NAME AND CON.CONSTRAINT_NAME=COL.CONSTRAINT_NAME AND CON.OWNER='SCOTT' AND COL.OWNER='SCOTT' AND CON.CONSTRAINT_TYPE IN ('P','U') ORDER BY CON.TABLE_NAME;
Subscribe to:
Posts (Atom)