Search This Blog

Total Pageviews

Saturday, 9 September 2017

Sql Information from Memory ...


Sql Information from Memory ... ..  

Sql information

Sql Information from Memory ...


set pages 100 lines 400
col sql_profile          for a32
col first_load_time   for a20
col last_load_time    for a20
col outline_category for a20
select sql_id, child_number, plan_hash_value, first_load_time, last_load_time, outline_category, sql_profile, executions,
trunc(decode(executions, 0, 0, rows_processed/executions))              rows_avg,
trunc(decode(executions, 0, 0, fetches/executions))                             fetches_avg,
trunc(decode(executions, 0, 0, disk_reads/executions))                       disk_reads_avg,
trunc(decode(executions, 0, 0, buffer_gets/executions))                      buffer_gets_avg,
trunc(decode(executions, 0, 0, cpu_time/executions))                         cpu_time_avg,
trunc(decode(executions, 0, 0, elapsed_time/executions))                   elapsed_time_avg,
trunc(decode(executions, 0, 0, application_wait_time/executions))     apwait_time_avg,
trunc(decode(executions, 0, 0, concurrency_wait_time/executions))   cwait_time_avg,
trunc(decode(executions, 0, 0, cluster_wait_time/executions))            clwait_time_avg,
trunc(decode(executions, 0, 0, user_io_wait_time/executions))           iowait_time_avg,
trunc(decode(executions, 0, 0, plsql_exec_time/executions))               plsexec_time_avg,
trunc(decode(executions, 0, 0, java_exec_time/executions))                javexec_time_avg
from gv$sql
where 1=1
and sql_id = '&sql_id'
and USERS_EXECUTING!=0 ---- Currently Executing
order by sql_id, child_number;

sql information from AWR
set pages 100 lines 400
col sql_profile          for a32
col first_load_time   for a20
col last_load_time    for a20
col outline_category for a20
select sql_id, snap_id, plan_hash_value, sql_profile, executions_total,
trunc(decode(executions_total, 0, 0, rows_processed_total/executions_total))    rows_avg,
trunc(decode(executions_total, 0, 0, fetches_total/executions_total))                  fetches_avg,
trunc(decode(executions_total, 0, 0, disk_reads_total/executions_total))            disk_reads_avg,
trunc(decode(executions_total, 0, 0, buffer_gets_total/executions_total))           buffer_gets_avg,
trunc(decode(executions_total, 0, 0, cpu_time_total/executions_total))              cpu_time_avg,
trunc(decode(executions_total, 0, 0, elapsed_time_total/executions_total))        elapsed_time_avg,
trunc(decode(executions_total, 0, 0, iowait_total/executions_total))                   iowait_time_avg,
trunc(decode(executions_total, 0, 0, clwait_total/executions_total))                   clwait_time_avg,
trunc(decode(executions_total, 0, 0, apwait_total/executions_total))                  apwait_time_avg,
trunc(decode(executions_total, 0, 0, ccwait_total/executions_total))                   ccwait_time_avg,
trunc(decode(executions_total, 0, 0, plsexec_time_total/executions_total))         plsexec_time_avg,
trunc(decode(executions_total, 0, 0, javexec_time_total/executions_total))         javexec_time_avg
from dba_hist_sqlstat
where 1=1
and sql_id = '&sql_id'
order by sql_id, snap_id;

Oracle online redo logs and standby logs Info ..

Oracle online redo logs and standby logs Info ..  

Oracle log file info ..
logfiles.sql
standby logs ...

Syntax to add /drop group are as Below

 SQL>Alter database add standby logfile Thread <thread no> group <group no> <PATH> size <>M;

SQL>Alter database add logfile Thread <thread no> group <group no>  <PATH> size <>M;


Handling ORL and SRL (Resize) on Primary and Physical Standby in Data Guard Environment (Doc ID 1532566.1)

set linesize 200 pagesize 200
column member format a50
column first_change# format 99999999999999999999
column next_change# format 99999999999999999999
select l.thread#,
       lf.group#,
       lf.member,
       trunc(l.bytes/1024/1024) as size_mb,
       l.status,
       l.archived,
       lf.type,
       lf.is_recovery_dest_file as rdf,
       l.sequence#,
       l.first_change#,
       l.next_change#   
from   v$logfile lf , v$log l
where  l.group# = lf.group#
order by l.thread#,lf.group#, lf.member;



with time !!!

alter session set nls_date_format='dd-mm-yyyy hh24:mi:ss';
set linesize 300 pagesize 200
column member         format a50
column first_change#  format 99999999999999999999
column next_change#   format 99999999999999999999
select l.thread#,
       lf.group#,
       lf.member,
       trunc(l.bytes/1024/1024) as size_mb,
       l.status,
       l.archived,
       lf.type,
       lf.is_recovery_dest_file as rdf,
       l.sequence#,
       l.first_change#,
       l.next_change# ,
FIRST_TIME,NEXT_TIME 
from   v$logfile lf , v$log l
where  l.group# = lf.group#
order by l.thread#,lf.group#, lf.member;




alter session set nls_date_format='dd-mm-yyyy hh24:mi:ss';
set linesize 300 pagesize 200
column member         format a90
column first_change#  format 99999999999999999999
column next_change#   format 99999999999999999999
col group_size for a10
select l.thread#,
       lf.group#,
       lf.member,
       CASE WHEN l.bytes < 1024          THEN l.bytes||''
WHEN l.bytes < POWER(1024,2) THEN ROUND(l.bytes/POWER(1024,1),1)||'K'
WHEN l.bytes < POWER(1024,3) THEN ROUND(l.bytes/POWER(1024,2),1)||'M'
WHEN l.bytes < POWER(1024,4) THEN ROUND(l.bytes/POWER(1024,3),1)||'G'
WHEN l.bytes < POWER(1024,5) THEN ROUND(l.bytes/POWER(1024,4),1)||'T'
 END group_size	 ,
       l.status,
       l.archived,
       lf.type,
       lf.is_recovery_dest_file as rdf,
       l.sequence#,
       l.first_change#,
       l.next_change# ,
FIRST_TIME,NEXT_TIME 
from   v$logfile lf , v$log l
where  l.group# = lf.group#
order by l.thread#,lf.group#, lf.member;




====================

For standby logs ...

set linesize 200 pagesize 200
column member            format a50
column first_change#   format 99999999999999999999
column next_change#   format 99999999999999999999
select l.thread#,
       lf.group#,
       lf.member,
       trunc(l.bytes/1024/1024) as size_mb,
       l.status,
       l.archived,
       lf.type,
       lf.is_recovery_dest_file as rdf,
       l.sequence#,
       l.first_change#,
       l.next_change#   
from   v$logfile lf , v$standby_log l
where  l.group# = lf.group#
order by l.thread#,lf.group#, lf.member;



select * from v$logfile where type='STANDBY';


set linesize 300 pagesize 200
column member            format a70
column first_change#   format 99999999999999999999
column next_change#   format 99999999999999999999
select l.thread#,
       lf.group#,
       lf.member,
       trunc(l.bytes/1024/1024) as size_mb,
       l.status,
       l.archived,
       lf.type,
       lf.is_recovery_dest_file as rdf,
       l.sequence#,
       l.first_change#,
       l.next_change#   
from   v$logfile lf , v$log l
where  l.group# = lf.group#
union 
select l.thread#,
       lf.group#,
       lf.member,
       trunc(l.bytes/1024/1024) as size_mb,
       l.status,
       l.archived,
       lf.type,
       lf.is_recovery_dest_file as rdf,
       l.sequence#,
       l.first_change#,
       l.next_change#   
from   v$logfile lf , v$standby_log l
where  l.group# = lf.group#
order by 7;

======
to create redo for standby  ..

select 'alter database add standby logfile '''||regexp_substr(MEMBER,'/.+/')||'stdby_'||regexp_replace(member,regexp_substr(MEMBER,'/.+/'),'')||''' size '||bytes||';' "Create Standby redo" from v$logfile lf , v$log l
where l.group# = lf.group#
union all
select 'alter database add standby logfile '''||regexp_substr(MEMBER,'/.+/')||'stdby_redo0'||(select max(group#)+1 from v$log)||'.rdo'||''' size '||bytes||';' "Create Standby redo" from v$logfile lf , v$log l
where l.group# = lf.group#
and rownum <=2
/

==============



set linesize 300 pagesize 300
COL GROUP#       FORMAT 999999 HEAD 'Group'
COL THREAD#        FORMAT 999999 HEAD 'Thread'
COL SEQUENCE#       FORMAT 99999999 HEAD 'Sequence'
COL size                FORMAT A6 HEAD 'Size|Mb'
COL STATUS        FORMAT A10
COL MEMBER        FORMAT A50
COL switch_dt        FORMAT A20
COL ARCHIVED        FORMAT A7
BREAK ON THREAD# NODUP ON GROUP# NODUP SKIP 1 ON sizeMb NODUP ON switch_dt NODUP
COMPUTE NUMBER LABEL 'MEMBER:' OF SEQUENCE# ON GROUP#

SELECT L.GROUP#, L.THREAD#, L.SEQUENCE#, LPAD(TRUNC(BYTES/1024/1024),5) sizeMb, L.STATUS, MEMBER, TO_CHAR(FIRST_TIME, 'dd-mm-yyyy HH24:MI:SS') switch_dt,
DECODE(ARCHIVED,'YES','Done..','Non') ARCHIVED
FROM V$LOG L, V$LOGFILE F
WHERE L.GROUP# = F.GROUP#
ORDER BY 1,3,6;

=============

ORA-01624: log 2 needed for crash recovery of instance
ORA-00312: online log 2 thread 

 alter database drop logfile group 2;
alter database drop logfile group 2
*
ERROR at line 1:
ORA-01624: log 2 needed for crash recovery of instance vihcdbd8 (thread 1)
ORA-00312: online log 2 thread 1: '/dumps/vihcdbd8_DATABASE/logfile_2'


 alter system checkpoint;  ----<<<<<<<<<<<<<<<<<<<<<<

System altered.

then drop 

 alter database clear logfile group 2;

Database altered.


begin
for log_cur in ( select group# group_no from v$log )
loop
execute immediate 'alter database clear logfile group '||log_cur.group_no;
end loop;
end;
/

to rename ...

define 1='/u01/app/oracle/Datafile/'

set linesize 200
 col file_name for a150
 select 'ALTER DATABASE RENAME FILE ''' ||member|| ''' to '||'''&1'|| substr(member,instr(member,'/',-1)+1, instr(substr(member,instr(member,'/',-1)+1),'.')-1 )||'.log' ||''' ;' file_name
from V$logfile;


====


-- standby log only 

set lines 200 pages 9999
col thread# for 9999990
col sequence# for 999999990
col grp for 990
col fnm for a50 head "File Name"
col "Fisrt SCN Number" for 999999999999990
break on thread

select a.thread#
,a.sequence#
,a.group# grp     
,a.bytes/1024/1024 Size_MB     
,a.status     
,a.archived     
,a.first_change# "First SCN Number"     
,to_char(FIRST_TIME,'DD-Mon-yyyy HH24:MI:SS') "First SCN Time"   
,to_char(LAST_TIME,'DD-Mon-yyyy HH24:MI:SS') "Last SCN Time"  from
v$standby_log a  order by 1,2,3,4
 /

alter database add standby logfile  group 15 '+DATA' size 1024m;

alter database add standby logfile  group 16 '+DATA' size 1024m;


 THREAD#  SEQUENCE#  GRP    SIZE_MB STATUS       ARC First SCN Number First SCN Time                Last SCN Time
-------- ---------- ---- ---------- ------------ --- ---------------- ----------------------------- -----------------------------
       0          0   15       1024 UNASSIGNED   YES
       0          0   16       1024 UNASSIGNED   YES

SQL> ALTER DATABASE DROP STANDBY LOGFILE GROUP 15;

Database altered.

SQL> ALTER DATABASE DROP STANDBY LOGFILE GROUP 16;

Database altered.


on a single instance, use the thread number while  create Standby Redo Log statement.

 with thread !!!  
With thread 
SQL> alter database add standby logfile thread 1 group 17 '+DATA' size 1048m;

Database altered.


alter database add standby logfile thread 1 group 16 '+DATA' size 1048m;

Database altered.

Start the database at mount stage
Clear unarchive log file
ALTER DATABASE CLEAR UNARCHIVED LOGFILE GROUP 2;
Start again the database in open.

Check the status 

set linesize 500
col "redo file name" for a60
SELECT group#,l.thread#,l.sequence#,l.archived,l.status,V.MEMBER "redo file name",bytes/1024/1024 "size in MB"
FROM V$LOG L JOIN V$LOGFILE V USING (GROUP#) ORDER BY GROUP#;


===
ORA-01275: Operation ADD LOGFILE is not allowed if standby file management is automatic

alter system set standby_file_management=MANUAL;




redo size Recommendation 


set linesize 300

select

   b.recid,

   to_char(b.first_time,'dd-mon-yy hh24:mi:ss') start_time,

   a.recid,

   to_char(a.first_time,'dd-mon-yy hh24:mi:ss') end_time,

   round(((a.first_time-b.first_time)*25)*60,2) minutes

from

   v$log_history a,  v$log_history b

where   a.recid = b.recid+1

and   a.first_time between to_date('2024-05-29:08:00:00','yyyy-mm-dd:hh24:mi:ss') 

                      and   to_date('2024-05-29:17:00:00','yyyy-mm-dd:hh24:mi:ss')

order by    a.first_time asc;


select time "Day" ,round(max(value/1024),2) "Max_Redo_KB/s",round(avg(value)/1024,2) "Avg_Redo(top30_mins)_KB/s",round(avg(value)*60*20/1024,0) "Recommended_ORL_size_KB"

from (

with agg as (

select to_char(BEGIN_TIME,'DD/MON/YYYY') time,value, dense_rank() over (partition by to_char(BEGIN_TIME,'DD/MON/YYYY') order by value desc) as rank

from dba_hist_sysmetric_history where metric_name = 'Redo Generated Per Sec' 

and to_char(BEGIN_TIME,'DD/MON/YYYY') > sysdate -7)

select time,value from agg where agg.rank <31

 ) group by time order by 1 desc



====




col fnfno head 'REDO GROUP'
col fnnam head 'LOGFILE' format a60
set linesize 200 trimspool on
set pagesize 100

col MEMBER format a95

clear break
break on thread# skip 1 on group# skip 1

with mirrors as (
select
--inst_id
fnfno
  --, FNFWD
--, lpad(radix.to_bin(FNFWD),8,'0') FNFWD_bin
  --, FNBWD
--, lpad(radix.to_bin(FNBWD),8,'0') FNBWD_bin
, decode(FNBWD,0,'Side 1','Side 2') mirror_side
--, decode(bitand(fn flg,8),0,' ONLINE','STANDBY')
, fnnam
--, decode(bitand(fnflg, 32),0,'NO','YES')
from x$kccfn
where fnnam is not null
and fntyp=3
), 
logfiles as(
select
--l.inst_id,
l.thread#,
l.group#,
sequence#,
bytes/1024/1024/1024 gb,
member,
l.status group_status,
f.status member_status,
l.archived,
to_char(first_time,'yyyy-mm-dd hh24:mi:ss') first_time
from v$log l, v$logfile f
where l.group# = f.group#
--and l.inst_id = f.inst_id
order by thread#,group#
)
select
l.thread#
, l.group#
, m.mirror_side
, l.sequence#
, l.gb
, l.member
, l.group_status
, l.member_status
, l.archived
from mirrors m
join logfiles l on l.group# = m.fnfno
and l.member = m.fnnam
order by l.thread#, l.group#, m.mirror_side
/

Oracle DBA

anuj blog Archive