Saturday, September 26, 2009

Extracts and returns the value of a specified datetime field from a datetime

-- Values That Can Be Extracted:

-- YEAR
-- MONTH
-- DAY
-- HOUR
-- MINUTE
-- SECOND
-- TIMEZONE_HOUR
-- TIMEZONE_MINUTE
-- TIMEZONE_REGION
-- TIMEZONE_ABBR

EXTRACT ( FROM )
SELECT EXTRACT(YEAR FROM DATE '2005-01-04') FROM dual;

CREATE TABLE extract_test (
person_id NUMBER(3),
hire_date TIMESTAMP WITH TIME ZONE)
TABLESPACE data_sml;

INSERT INTO extract_test VALUES (100, SYSTIMESTAMP+1);
INSERT INTO extract_test VALUES (200, SYSTIMESTAMP+34);

SELECT EXTRACT(TIMEZONE_HOUR FROM hire_date)
FROM extract_test

ANSI date-time datatypes and arithmetic

ALTER session SET nls_date_format = 'YYYY-MM-DD';

-- old
SELECT ADD_MONTHS(TO_DATE('2003-07-31','YYYY-MM-DD'),-1) FROM dual;

ADD_MONTHS
----------
2003-06-30

-- new
SELECT TO_TIMESTAMP('2003-07-31','YYYY-MM-DD') - INTERVAL '1' MONTH FROM dual;

ERROR AT line 1:
ORA-01839: DATE NOT valid FOR MONTH specified

-- old
SELECT TO_DATE('2003-07-31','YYYY-MM-DD') - 100 FROM dual;

-- new (broken)
SELECT TO_TIMESTAMP('2003-07-31','YYYY-MM-DD') - INTERVAL '100' DAY FROM dual;

ERROR AT line 1:
ORA-01873: the leading precision OF the INTERVAL IS too small

-- new (note the extra "(3)")
SELECT TO_TIMESTAMP('2003-07-31','YYYY-MM-DD') - INTERVAL '100' DAY(3) FROM dual;

TO_TIMESTAMP('2003-07-31','YYYY-MM-DD')-INTERVAL'100'DAY(3)
-------------------------------------------------------------
2003-04-22 00:00:00

Get seconds between dates

CREATE OR REPLACE FUNCTION time_diff (
DATE_1 IN DATE, DATE_2 IN DATE) RETURN NUMBER IS

NDATE_1 NUMBER;
NDATE_2 NUMBER;
NSECOND_1 NUMBER(5,0);
NSECOND_2 NUMBER(5,0);

BEGIN
-- Get Julian date number from first date (DATE_1)
NDATE_1 := TO_NUMBER(TO_CHAR(DATE_1, 'J'));

-- Get Julian date number from second date (DATE_2)
NDATE_2 := TO_NUMBER(TO_CHAR(DATE_2, 'J'));

-- Get seconds since midnight from first date (DATE_1)
NSECOND_1 := TO_NUMBER(TO_CHAR(DATE_1, 'SSSSS'));

-- Get seconds since midnight from second date (DATE_2)
NSECOND_2 := TO_NUMBER(TO_CHAR(DATE_2, 'SSSSS'));

RETURN (((NDATE_2 - NDATE_1) * 86400)+(NSECOND_2 - NSECOND_1));
END time_diff;
/

information about current database

SET termout off
store SET store rep
SET head off
SET pause off
SET termout ON

SELECT 'User: '|| USER || ' on database ' || global_name,
' (term='||USERENV('TERMINAL')||
', audsid='||USERENV('SESSIONID')||')' AS MYCONTEXT
FROM global_name;

@store
SET termout ON

Audit User Logins

CONNECT / AS SYSDBA

DROP TABLE user_login_audit;

CREATE TABLE user_login_audit (
login_time DATE,
username VARCHAR2(30),
machine VARCHAR2(30),
command VARCHAR2(128)
);

CREATE OR REPLACE TRIGGER user_login_trig
AFTER LOGON ON scott.SCHEMA
DECLARE
v_username sys.v_$session.username%TYPE;
v_machine sys.v_$session.machine%TYPE;
v_command sys.v_$session.command%TYPE;
BEGIN
SELECT username, machine, command
INTO v_username, v_machine, v_command
FROM sys.v_$session
WHERE audsid = USERENV('SESSIONID')
AND audsid != 0 -- Don't Check SYS Connections
AND ROWNUM = 1; -- Parallel processes will have the same AUDSID's

INSERT INTO sys.user_login_audit
VALUES (SYSDATE, v_username, v_machine, v_command);

IF UPPER(v_machine) LIKE '%PC1%' THEN -- Start SQL trace for users from PC1
DBMS_SESSION.set_sql_trace(TRUE);
END IF;
END;
/
SHOW ERRORS

CONNECT SCOTT/TIGER

CONNECT / AS SYSDBA

SELECT * FROM sys.user_login_audit;

Block TOAD and other tools

CONNECT / AS SYSDBA;

CREATE OR REPLACE TRIGGER block_tools_from_prod
AFTER LOGON ON DATABASE
DECLARE
v_prog sys.v_$session.program%TYPE;
BEGIN
SELECT program INTO v_prog
FROM sys.v_$session
WHERE audsid = USERENV('SESSIONID')
AND audsid != 0 -- Don't Check SYS Connections
AND ROWNUM = 1; -- Parallel processes will have the same AUDSID's

IF UPPER(v_prog) LIKE '%TOAD%' OR UPPER(v_prog) LIKE '%T.O.A.D%' OR -- Toad
UPPER(v_prog) LIKE '%SQLNAV%' OR -- SQL Navigator
UPPER(v_prog) LIKE '%PLSQLDEV%' OR -- PLSQL Developer
UPPER(v_prog) LIKE '%BUSOBJ%' OR -- Business Objects
UPPER(v_prog) LIKE '%EXCEL%' -- MS-Excel plug-in
THEN
RAISE_APPLICATION_ERROR(-20000, 'Development tools are not allowed here.');
END IF;
END;
/
SHOW ERRORS

user connection details in oracle with SYS_CONTEXT

SET serveroutput on size 500000
SET feedback off

DECLARE
CURSOR c_ctx IS
SELECT SYS_CONTEXT ('USERENV', 'TERMINAL') terminal,
SYS_CONTEXT ('USERENV', 'LANGUAGE') LANGUAGE,
SYS_CONTEXT ('USERENV', 'SESSIONID') sessionid,
SYS_CONTEXT ('USERENV', 'INSTANCE') INSTANCE,
SYS_CONTEXT ('USERENV', 'ENTRYID') entryid,
SYS_CONTEXT ('USERENV', 'ISDBA') isdba,
SYS_CONTEXT ('USERENV', 'NLS_TERRITORY') nls_territory,
SYS_CONTEXT ('USERENV', 'NLS_CURRENCY') nls_currency,
SYS_CONTEXT ('USERENV', 'NLS_CALENDAR') nls_calendar,
SYS_CONTEXT ('USERENV', 'NLS_DATE_FORMAT') nls_date_format,
SYS_CONTEXT ('USERENV', 'NLS_DATE_LANGUAGE') nls_date_language,
SYS_CONTEXT ('USERENV', 'NLS_SORT') nls_sort,
SYS_CONTEXT ('USERENV', 'CURRENT_USER') CURRENT_USER,
SYS_CONTEXT ('USERENV', 'CURRENT_USERID') current_userid,
SYS_CONTEXT ('USERENV', 'SESSION_USER') session_user,
SYS_CONTEXT ('USERENV', 'SESSION_USERID') session_userid,
SYS_CONTEXT ('USERENV', 'PROXY_USER') proxy_user,
SYS_CONTEXT ('USERENV', 'PROXY_USERID') proxy_userid,
SYS_CONTEXT ('USERENV', 'DB_DOMAIN') db_domain,
SYS_CONTEXT ('USERENV', 'DB_NAME') db_name,
SYS_CONTEXT ('USERENV', 'HOST') HOST,
SYS_CONTEXT ('USERENV', 'OS_USER') os_user,
SYS_CONTEXT ('USERENV', 'EXTERNAL_NAME') external_name,
SYS_CONTEXT ('USERENV', 'IP_ADDRESS') ip_address,
SYS_CONTEXT ('USERENV', 'NETWORK_PROTOCOL') network_protocol,
SYS_CONTEXT ('USERENV', 'BG_JOB_ID') bg_job_id,
SYS_CONTEXT ('USERENV', 'FG_JOB_ID') fg_job_id,
SYS_CONTEXT ('USERENV',
'AUTHENTICATION_TYPE'
) authentication_type,
SYS_CONTEXT ('USERENV',
'AUTHENTICATION_DATA'
) authentication_data,
SYS_CONTEXT ('USERENV', 'CURRENT_SQL') current_sql,
SYS_CONTEXT ('USERENV', 'CLIENT_IDENTIFIER') client_identifier,
SYS_CONTEXT ('USERENV',
'GLOBAL_CONTEXT_MEMORY'
) global_context_memory
FROM DUAL;

ctx c_ctx%ROWTYPE;

-- Utility Functions
FUNCTION pad (s VARCHAR2, padsize NUMBER := 25, padchar VARCHAR2 := '')
RETURN VARCHAR2 IS
BEGIN
RETURN RPAD (s, padsize, padchar);
END pad;

------------
PROCEDURE PRINT (s VARCHAR2) IS
BEGIN
DBMS_OUTPUT.put_line (s);
END PRINT;

----------------
PROCEDURE newln IS
BEGIN
PRINT (CHR (10));
END newln;
BEGIN
OPEN c_ctx;

FETCH c_ctx
INTO ctx;

CLOSE c_ctx;

newln;
PRINT ('Connection Context');
PRINT ('==================');
newln;
PRINT ('User Details');
PRINT ('------------');
PRINT (pad ('Session ID: ') || ctx.sessionid);
PRINT ( pad ('Session User ID: ')
|| ctx.session_userid
|| ' /
'
|| ctx.session_user
);
PRINT ( pad ('Current User ID: ')
|| ctx.current_userid
|| ' /
'
|| ctx.CURRENT_USER
);
PRINT (pad ('Is DBA: ') || ctx.isdba);
newln;
PRINT ('Machine/Network Details');
PRINT ('-----------------------');
PRINT (pad ('Terminal: ') || ctx.terminal);
PRINT (pad ('Host: ') || ctx.HOST);
PRINT (pad ('OS User: ') || ctx.os_user);
PRINT (pad ('IP Address: ') || ctx.ip_address);
PRINT (pad ('Network Protocol: ') || ctx.network_protocol);
PRINT (pad ('Authentication Type: ') || ctx.authentication_type);
newln;
PRINT ('NLS Settings');
PRINT ('------------');
PRINT (pad ('NLS Territory: ') || ctx.nls_territory);
PRINT (pad ('NLS Currency: ') || ctx.nls_currency);
PRINT (pad ('NLS Date Format: ') || ctx.nls_date_format);
PRINT (pad ('NLS Date Language: ') || ctx.nls_date_language);
PRINT (pad ('NLS Sort: ') || ctx.nls_sort);
newln;
PRINT ('Database');
PRINT ('--------');
PRINT (pad ('DB Name: ') || ctx.db_name);
PRINT (pad ('DB Domain: ') || ctx.db_domain);
PRINT (pad ('Language: ') || ctx.LANGUAGE);
newln;
END;
/

Use SYS_CONTEXT to Obtain Session Information

select sid from v$session where audsid=sys_context('userenv','SESSIONID');

SYS_CONTEXT

USERENV is an Oracle provided namespace that describes the current session.

Syntax

SELECT sys_context('', '', );
FROM dual;

Identifies the position in the module (application name) and is set through the DBMS_APPLICATION_INFO package or OCI.

SELECT sys_context('USERENV', 'ACTION') FROM dual;

exec dbms_application_info.set_action('INSERTING');

SELECT sys_context('USERENV', 'ACTION') FROM dual;

1. AUDITED_CURSORID
--------------------

Returns the cursor ID of the SQL that triggered the audit. This parameter is not valid in a fine-grained auditing environment. If you specify it in such an environment, Oracle Database always returns NULL.

2. AUTHENTICATED_IDENTITY
-------------------------

Returns the identity used in authentication. In the list that follows, the type of user is followed by the value returned:
Kerberos-authenticated enterprise user: kerberos principal name
Kerberos-authenticated external user : kerberos principal name; same as the schema name
SSL-authenticated enterprise user: the DN in the user's PKI certificate
SSL-authenticated external user: the DN in the user's PKI certificate
Password-authenticated enterprise user: nickname; same as the login name
Password-authenticated database user: the database username; same as the schema name
OS-authenticated external user: the external operating system user name
Radius/DCE-authenticated external user: the schema name
Proxy with DN : Oracle Internet Directory DN of the client
Proxy with certificate: certificate DN of the client
Proxy with username: database user name if client is a local database user; nickname if client is an enterprise user
SYSDBA/SYSOPER using Password File: login name
SYSDBA/SYSOPER using OS authentication: operating system user name

3. AUTHENTICATION_DATA
----------------------
Data being used to authenticate the login user. For X.503 certificate authenticated sessions, this field returns the context of the certificate in HEX2 format.

Note: You can change the return value of the AUTHENTICATION_DATA attribute using the length parameter of the syntax. Values of up to 4000 are accepted. This is the only attribute of USERENV for which Oracle implements such a change.

4.AUTHENTICATION_METHOD
----------------------

Returns the method of authentication. In the list that follows, the type of user is followed by the method returned.
Password-authenticated enterprise user, local database user, or SYSDBA/SYSOPER using Password File; proxy with username using password: PASSWORD
Kerberos-authenticated enterprise or external user: KERBEROS
SSL-authenticated enterprise or external user: SSL
Radius-authenticated external user: RADIUS
OS-authenticated external user or SYSDBA/SYSOPER: OS
DCE-authenticated external user: DCE
Proxy with certificate, DN, or username without using password: NONE

5.BG_JOB_ID
------------

Job ID of the current session if it was established by an Oracle background process. Null if the session was not established by a background process.

6. CLIENT_IDENTIFIER
----------------------
Returns an identifier that is set by the application through the DBMS_SESSION.SET_IDENTIFIER procedure, the OCI attribute OCI_ATTR_CLIENT_IDENTIFIER, or the Java class Oracle.jdbc.OracleConnection.setClientIdentifier. This attribute is used by various database components to identify lightweight application users who authenticate as the same user.
SELECT sys_context('USERENV', 'CLIENT_IDENTIFIER') FROM dual;

exec dbms_session.set_identifier(USER || ' ' || SYSTIMESTAMP);

SELECT sys_context('USERENV', 'CLIENT_IDENTIFIER') FROM dual;

7. CLIENT_INFO
--------------
Returns user session information that can be stored by an application using the DBMS_APPLICATION_INFO package.
SELECT sys_context('USERENV', 'CLIENT_INFO') FROM dual;

exec dbms_application_info.set_client_info('TEST');

SELECT sys_context('USERENV', 'CLIENT_INFO') FROM dual;

8. CURRENT_BIND
---------------
The bind variables for fine-grained auditing

9. CURRENT_EDITION_ID
-------------------
The numeric identifier of the current edition
SELECT sys_context('USERENV', 'CURRENT_EDITION_ID') FROM dual;

10. CURRENT_EDITION_NAME
---------------------
The name of the current edition
SELECT sys_context('USERENV', 'CURRENT_EDITION_NAME') FROM dual;

11. CURRENT_SCHEMA
-------------
Name of the default schema being used in the current schema. This value can be changed during the session with an ALTER SESSION SET CURRENT_SCHEMA statement.

SELECT sys_context('USERENV', 'CURRENT_SCHEMA') FROM dual;

12. CURRENT_SCHEMAID
----------------
Identifier of the default schema being used in the current session.
SELECT sys_context('USERENV', 'CURRENT_SCHEMAID') FROM dual;

SELECT user#
FROM sys.user$
WHERE name = USER;

13. CURRENT_SQL
--------------
Returns the first 4K bytes of the current SQL that triggered the fine-grained auditing event.

14. CURRENT_SQLn
-------------
CURRENT_SQLn attributes return subsequent 4K-byte increments, where n can be an integer from 1 to 7, inclusive. CURRENT_SQL1 returns bytes 4K to 8K; CURRENT_SQL2 returns bytes 8K to 12K, and so forth. You can specify these attributes only inside the event handler for the fine-grained auditing feature.

15. CURRENT_SQL_LENGTH
----------------------
The length of the current SQL statement that triggers fine-grained audit or row-level security (RLS) policy functions or event handlers. Valid only inside the function or event handler.

16. DB_DOMAIN
-------------
Domain of the database as specified in the DB_DOMAIN initialization parameter.
SELECT sys_context('USERENV', 'DB_DOMAIN') FROM dual;

17. DB_NAME
------------
Name of the database as specified in the DB_NAME initialization parameter.
SELECT sys_context('USERENV', 'DB_NAME') FROM dual;

SELECT name, value
FROM gv$parameter
where name LIKE 'db%name';

18.DB_UNIQUE NAME
-------------
Name of the database as specified in the DB_UNIQUE_NAME initialization parameter.
SELECT sys_context('USERENV', 'DB_UNIQUE_NAME') FROM dual;

SELECT name, value
FROM gv$parameter
where name LIKE 'db%name';

19. ENTRYID
-----------
The available auditing entry identifier. You cannot use this option in distributed SQL statements. To use this keyword in USERENV, the initialization parameter AUDIT_TRAIL must be set to true.

20. ENTERPRISE_IDENTITY
--------------------
Returns the user's enterprise-wide identity:
For enterprise users: the Oracle Internet Directory DN.
For external users: the external identity (Kerberos principal name, Radius and DCE schema names, OS user name, Certificate DN).
For local users and SYSDBA/SYSOPER logins: NULL.
The value of the attribute differs by proxy method:
For a proxy with DN: the Oracle Internet Directory DN of the client
For a proxy with certificate: the certificate DN of the client for external users; the Oracle Internet Directory DN for global users
For a proxy with username: the Oracle Internet Directory DN if the client is an enterprise users; NULL if the client is a local database user.

21. FG_JOB_ID
---------
Job ID of the current session if it was established by a client foreground process. Null if the session was not established by a foreground process.

22. GLOBAL_CONTEXT_MEMORY
----------------------
The number used in the System Global Area by the globally accessed context.

SELECT sys_context('USERENV', 'GLOBAL_CONTEXT_MEMORY') FROM dual;

23. GLOBAL_UID
----------
Returns the global user ID from Oracle Internet Directory for Enterprise User Security (EUS) logins; returns null for all other logins.

24. HOST
--------
Name of the host machine from which the client has connected.
SELECT sys_context('USERENV', 'HOST') FROM dual;

25. IDENTIFICATION_TYPE
----------------------
Returns the way the user's schema was created in the database. Specifically, it reflects the IDENTIFIED clause in the CREATE/ALTER USER syntax. In the list that follows, the syntax used during schema creation is followed by the identification type returned:
IDENTIFIED BY password: LOCAL
IDENTIFIED EXTERNALLY: EXTERNAL
IDENTIFIED GLOBALLY: GLOBAL SHARED
IDENTIFIED GLOBALLY AS DN: GLOBAL PRIVATE

SELECT sys_context('USERENV', 'IDENTIFICATION_TYPE') FROM dual;

26. INSTANCE
--------
The instance identification number of the current instance.
SELECT sys_context('USERENV', 'INSTANCE') FROM dual;

27. INSTANCE_NAME
----------------
The name of the instance.
SELECT sys_context('USERENV', 'INSTANCE_NAME') FROM dual;

28. IP_ADDRESS
-------------
IP address of the machine from which the client is connected.
ISDBA TRUE if the session is SYS
SELECT sys_context('USERENV', 'ISDBA') FROM dual;

29. LANG
--------
The ISO abbreviation for the language name, a shorter form than the existing 'LANGUAGE' parameter.
SELECT sys_context('USERENV', 'LANG') FROM dual;

30. LANGUAGE
------------
The language and territory currently used by your session, along with the database character set, in the form:

language_territory.characterset.
SELECT sys_context('USERENV', 'LANGUAGE') FROM dual;

31. MODULE
---------
The application name (module) set through the DBMS_APPLICATION_INFO package or OCI.
SELECT sys_context('USERENV', 'MODULE') FROM dual;

32. NETWORK_PROTOCOL
-------------------
Network protocol being used for communication, as specified in the 'PROTOCOL=protocol' portion of the connect string.

33. NLS_CALENDAR
------------
The current calendar of the current session.
SELECT sys_context('USERENV', 'NLS_CALENDAR') FROM dual;

34. NLS_CURRENCY
------------
The currency of the current session.
SELECT sys_context('USERENV', 'NLS_CURRENCY') FROM dual;

35. NLS_DATE_FORMAT
----------------
The date format for the session.
SELECT sys_context('USERENV', 'NLS_DATE_FORMAT') FROM dual;

36. NLS_DATE_LANGUAGE
-----------------
The language used for expressing dates.
SELECT sys_context('USERENV', 'NLS_DATE_LANGUAGE') FROM dual;

37. NLS_SORT
-----------
BINARY or the linguistic sort basis.
SELECT sys_context('USERENV', 'NLS_SORT') FROM dual;

38. NLS_TERRITORY
----------------
The territory of the current session.
SELECT sys_context('USERENV', 'NLS_TERRITORY') FROM dual;

39. OS_USER
-------
Operating system username of the client process that initiated the database session.
SELECT sys_context('USERENV', 'OS_USER') FROM dual;

40. POLICY_INVOKER
-------------
The invoker of row-level security (RLS) policy functions.

41. PROXY_ENTERPRISE_IDENTITY
-----------------------
Returns the Oracle Internet Directory DN when the proxy user is an enterprise user.

42.PROXY_GLOBAL_UID
---------------
Returns the global user ID from Oracle Internet Directory for Enterprise User Security (EUS) proxy users; returns NULL for all other proxy users.

43. PROXY_USER
-----------
Name of the database user who opened the current session on behalf of SESSION_USER.

44. PROXY_USERID
------------
Identifier of the database user who opened the current session on behalf of SESSION_USER.

45. SERVER_HOST
-----------
The host name of the machine on which the instance is running.
SELECT sys_context('USERENV', 'SERVER_HOST') FROM dual;

46. SERVICE_NAME
------------
The name of the service to which a given session is connected.
SELECT sys_context('USERENV', 'SERVICE_NAME') FROM dual;

47. SESSION_USER
------------
Database user name by which the current user is authenticated. This value remains the same throughout the duration of the session.
SELECT sys_context('USERENV', 'SESSION_USER') FROM dual;

48. SESSION_USERID
--------------
Identifier of the database user name by which the current user is authenticated.
SELECT sys_context('USERENV', 'SESSION_USERID') FROM dual;

49. SESSIONID
---------
The auditing session identifier. You cannot use this option in distributed SQL statements. This is the equivalent to the AUDSID column in gv$session.
SELECT sys_context('USERENV', 'SESSIONID') FROM dual;

50. SID
------
The session number (different from the session ID).
SELECT sys_context('USERENV', 'SID') FROM dual;

51. STATEMENTID
-----------
The auditing statement identifier. STATEMENTID represents the number of SQL statements audited in a given session.

52. TERMINAL
-----------
The operating system identifier for the client of the current session. In distributed SQL statements, this option returns the identifier for your local session. In a distributed environment, this is supported only for remote SELECT statements, not for remote INSERT, UPDATE, or DELETE operations. (The return length of this parameter may vary by operating system.)

SELECT sys_context('USERENV', 'TERMINAL') FROM dual;

How to get host IP in oracle

select utl_inaddr.get_host_address(host_name), host_name from
v$instance;



How to get Client IP in oracle

select sys_context('userenv','ip_address')
from dual;


How can i took xml field data form clob clumn directly

i want to insert data in a table directly from clob data type.I have a long xml document.I am useing this way because i want tokeep save the xml in my database table first.


SQL> CREATE TABLE XML_LOAD_IN
2 (XML_CFILE CLOB
3 );

Table created.

SQL>
SQL> INSERT INTO XML_LOAD_IN ( XML_CFILE)
2 VALUES (
3 '
4 1
5 2
6 1500
7 2
8 Bank Asia
9 20090714
10

11 ');

1 row created.

SQL>
SQL> CREATE TABLE CCR_IN
2 (
3 “BundleCount” NUMBER(6) NOT NULL,
4 “ItemWithinCashLetterCount” NUMBER(8) NOT NULL,
5 “CashLetterTotalAmount” NUMBER(14) NOT NULL,
6 “ImagesWithinCashLetterCount” NUMBER(9),
7 “ECEInstitutionName” VARCHAR2(18 BYTE),
8 “SettlementDate” VARCHAR2(8 BYTE)
9 )
10 /

Table created.


CREATE OR REPLACE PROCEDURE mic.dpr_insert_from_xml_ccr IS
l_bfile BFILE;
l_clob CLOB;
l_parser DBMS_XMLPARSER.parser;
l_doc DBMS_XMLDOM.domdocument;
l_nl DBMS_XMLDOM.domnodelist;
l_n DBMS_XMLDOM.domnode;

TYPE tab_type IS TABLE OF ccr_in%ROWTYPE;

t_tab tab_type := tab_type ();
BEGIN
BEGIN
SELECT xml_cfile
INTO l_clob
FROM xml_load_in;
EXCEPTION
WHEN NO_DATA_FOUND THEN
raise_application_error (-2001, 'Inward XML File Not Found.');
WHEN OTHERS THEN
RAISE;
END;

l_parser := DBMS_XMLPARSER.newparser;
DBMS_XMLPARSER.parseclob (l_parser, l_clob);
l_doc := DBMS_XMLPARSER.getdocument (l_parser);
DBMS_XMLPARSER.freeparser (l_parser);
l_nl :=
DBMS_XSLPROCESSOR.selectnodes (DBMS_XMLDOM.makenode (l_doc), '/CCR');

---FOR ECR
FOR cur_ccr IN 0 .. DBMS_XMLDOM.getlength (l_nl) - 1 LOOP
l_n := DBMS_XMLDOM.item (l_nl, cur_ccr);
t_tab.EXTEND;
--- Use XPATH syntax to assign values to he elements of the collection.
DBMS_XSLPROCESSOR.valueof (l_n,
'BundleCount/text()',
t_tab (t_tab.LAST)."BundleCount"
);
DBMS_XSLPROCESSOR.valueof (l_n,
'ItemWithinCashLetterCount/text()',
t_tab (t_tab.LAST)."ItemWithinCashLetterCount"
);
DBMS_XSLPROCESSOR.valueof (l_n,
'CashLetterTotalAmount/text()',
t_tab (t_tab.LAST)."CashLetterTotalAmount"
);
DBMS_XSLPROCESSOR.valueof
(l_n,
'ImagesWithinCashLetterCount/text()',
t_tab (t_tab.LAST)."ImagesWithinCashLetterCount"
);
DBMS_XSLPROCESSOR.valueof (l_n,
'ECEInstitutionName/text()',
t_tab (t_tab.LAST)."ECEInstitutionName"
);
DBMS_XSLPROCESSOR.valueof (l_n,
'SettlementDate/text()',
t_tab (t_tab.LAST)."SettlementDate"
);
END LOOP;

FOR cur_ccr IN t_tab.FIRST .. t_tab.LAST LOOP
INSERT INTO ccr_in
("BundleCount",
"ItemWithinCashLetterCount",
"CashLetterTotalAmount",
"ImagesWithinCashLetterCount",
"ECEInstitutionName",
"SettlementDate"
)
VALUES (t_tab (cur_ccr)."BundleCount",
t_tab (cur_ccr)."ItemWithinCashLetterCount",
t_tab (cur_ccr)."CashLetterTotalAmount",
t_tab (cur_ccr)."ImagesWithinCashLetterCount",
t_tab (cur_ccr)."ECEInstitutionName",
t_tab (cur_ccr)."SettlementDate"
);
END LOOP;

COMMIT;
DBMS_XMLDOM.freedocument (l_doc);
EXCEPTION
WHEN OTHERS THEN
DBMS_XMLPARSER.freeparser (l_parser);
DBMS_XMLDOM.freedocument (l_doc);
END;
/

Procedure created.

SQL>
SQL> select * From xml_load_in;

XML_CFILE
——————————————————————————–

1
2
SQL>
SQL> exec Dpr_Insert_From_Xml_CCR;

PL/SQL procedure successfully completed.

SQL>
SQL> select * from ccr_in;

BundleCount ItemWithinCashLetterCount CashLetterTotalAmount
———– ————————- ———————
ImagesWithinCashLetterCount ECEInstitutionName Settleme
————————— —————— ——–
1 2 1500
2 20090714

SQL>

Monday, September 21, 2009

working with BFile content and index

Bfile locator stay in table columns and file are stayed in hard drive.

Steps
=========
0.Create the Oracle directory object

1. Create the table.

2. Create the sequence.

3. Create the index.

4. Create procedure Loadfile_Concurso

5. Load the document with: exec Loadfile_Concurso('/05/24686@Bases de Cotizaciò® ²7-04.doc', to_date('26/07/2004','dd/MM/Y YYY'), '501', 'doc');

6. No errors... executing any of the previous.

7. List the rows on the index with: SELECT count(*) FROM dr$GC_ConcursoDoc_CTX$i

8. No rows... no the index table


-----------------------------
SCOTT@orcl_11g> -- 0. Create the Oracle directory object:
SCOTT@orcl_11g> CREATE OR REPLACE DIRECTORY GC_ConcursosDocs AS 'c:\oracle11g';

Directory created.

SCOTT@orcl_11g> -- 1. Create the table.
SCOTT@orcl_11g> CREATE TABLE GC_ConcursoDoc_Idx (
2 id NUMBER,
3 nombre_archivo VARCHAR2(255),
4 fecha_upload DATE,
5 filesize VARCHAR2(20),
6 mime VARCHAR2(50),
7 content BFILE,
8 CONSTRAINT doc_pk PRIMARY KEY (id)
9 );

Table created.

SCOTT@orcl_11g> -- 2. Create the sequence.
SCOTT@orcl_11g> CREATE SEQUENCE GC_CONCURSODOC_SEQ
2 START WITH 1 INCREMENT BY 1 MINVALUE 1 NOCACHE NOCYCLE NOORDER;

Sequence created.

SCOTT@orcl_11g> -- 3. Create the index.
SCOTT@orcl_11g> CREATE INDEX GC_ConcursoDoc_CTX ON GC_ConcursoDoc_Idx(content)
2 INDEXTYPE IS CTXSYS.CONTEXT PARAMETERS ('FILTER CTXSYS.AUTO_FILTER SYNC ( ON COMMIT)');

Index created.

SCOTT@orcl_11g> -- 4. Create procedure Loadfile_Concurso.
SCOTT@orcl_11g> CREATE OR REPLACE PROCEDURE Loadfile_Concurso
2 (
3 p_file_name IN GC_ConcursoDoc_Idx.nombre_archivo%TYPE,
4 p_upload_date IN GC_ConcursoDoc_Idx.fecha_upload%TYPE,
5 p_filesize IN GC_ConcursoDoc_Idx.filesize%TYPE,
6 p_mime IN GC_ConcursoDoc_Idx.mime%TYPE
7 )
8 AS
9 index_name varchar2(20) := 'GC_ConcursoDoc_CTX';
10 BEGIN
11 INSERT INTO GC_ConcursoDoc_Idx (id, nombre_archivo, fecha_upload,filesize, mime, content)
12 VALUES (GC_CONCURSODOC_SEQ.NEXTVAL, p_file_name, p_upload_date, p_filesize, p_mime,BFILENAME
13 ('GC_CONCURSOSDOCS',p_file_name));
14 COMMIT;
15 END;
16 /

Procedure created.

SCOTT@orcl_11g> SHOW ERRORS
No errors.
SCOTT@orcl_11g> -- 5. Load with short file name.
SCOTT@orcl_11g> exec Loadfile_Concurso('24686O~1.DOC', to_date('26/07/2004','dd/MM/YYYY'), '501', 'doc');

PL/SQL procedure successfully completed.

SCOTT@orcl_11g> -- list rows:
SCOTT@orcl_11g> SELECT count(*) FROM dr$GC_ConcursoDoc_CTX$i;

COUNT(*)
----------
2

SCOTT@orcl_11g> -- list errors:
SCOTT@orcl_11g> SELECT err_index_name, err_timestamp,err_text FROM ctx_user_index_errors ORDER BY err_timestamp;

no rows selected

SCOTT@orcl_11g>
SCOTT@orcl_11g> -- 5. Repeat of step 5 with long file name.
SCOTT@orcl_11g> exec Loadfile_Concurso('24686@Bases de Cotizaciò® ²7-04.doc', to_date('26/07/2004','dd/MM/YYYY'), '501', 'doc');

PL/SQL procedure successfully completed.

SCOTT@orcl_11g> -- list rows:
SCOTT@orcl_11g> SELECT count(*) FROM dr$GC_ConcursoDoc_CTX$i;

COUNT(*)
----------
2

SCOTT@orcl_11g> -- list errors:
SCOTT@orcl_11g> SELECT err_index_name, err_timestamp,err_text FROM ctx_user_index_errors ORDER BY err_timestamp;

ERR_INDEX_NAME ERR_TIMEST
------------------------------ ----------
ERR_TEXT
--------------------------------------------------------------------------------
GC_CONCURSODOC_CTX 03/11/2008
DRG-11101: failed to open file 24686@Bases de Cotizaciò® ²7-04.doc

[There are various problems with long file names with spaces in them on Windows systems. It is better to use the short name that you see with dir/x, usually the first 6 characters, followed by a tilde, followed by a number, then the extension.]

==========================================================
============Another modified Example======================
==========================================================
SCOTT@orcl_11g> HOST DIR c:\oracle11g\2*.doc/x
...
Directory of c:\oracle11g

10/31/2008 02:09 PM 22 24686O~1.DOC 24686orclBases de Cotizaci•n 27-04.doc
1 File(s) 22 bytes
0 Dir(s) 62,669,983,744 bytes free

SCOTT@orcl_11g>

-----------------------------------------------------------------------
SCOTT@orcl_11g> CREATE TABLE GC_ConcursoDoc_Idx (
2 id NUMBER,
3 nombre_archivo VARCHAR2(255),
4 fecha_upload DATE,
5 filesize VARCHAR2(20),
6 mime VARCHAR2(50),
7 content BFILE,
8 CONSTRAINT doc_pk PRIMARY KEY (id)
9 );

Table created.

SCOTT@orcl_11g> CREATE OR REPLACE DIRECTORY GC_ConcursosDocs AS 'c:\oracle11g';

Directory created.

SCOTT@orcl_11g> CREATE INDEX GC_ConcursoDoc_CTX ON GC_ConcursoDoc_Idx(content)
2 INDEXTYPE IS CTXSYS.CONTEXT PARAMETERS ('FILTER CTXSYS.AUTO_FILTER SYNC ( ON COMMIT)');

Index created.

SCOTT@orcl_11g> CREATE SEQUENCE GC_CONCURSODOC_SEQ
2 START WITH 1 INCREMENT BY 1 MINVALUE 1 NOCACHE NOCYCLE NOORDER;

Sequence created.

SCOTT@orcl_11g> CREATE OR REPLACE PROCEDURE Loadfile_Concurso
2 (
3 p_file_name IN GC_ConcursoDoc_Idx.nombre_archivo%TYPE,
4 p_upload_date IN GC_ConcursoDoc_Idx.fecha_upload%TYPE,
5 p_filesize IN GC_ConcursoDoc_Idx.filesize%TYPE,
6 p_mime IN GC_ConcursoDoc_Idx.mime%TYPE
7 )
8 AS
9 index_name varchar2(20) := 'GC_ConcursoDoc_CTX';
10 BEGIN
11 INSERT INTO GC_ConcursoDoc_Idx (id, nombre_archivo, fecha_upload,filesize, mime, content)
12 VALUES (GC_CONCURSODOC_SEQ.NEXTVAL, p_file_name, p_upload_date, p_filesize, p_mime,BFILENAME
13 ('GC_CONCURSOSDOCS',p_file_name));
14 COMMIT;
15 END;
16 /

Procedure created.

SCOTT@orcl_11g> SHOW ERRORS
No errors.
SCOTT@orcl_11g> exec Loadfile_Concurso('24686O~1.DOC', to_date('26/07/2004','dd/MM/YYYY'), '501', 'doc');

PL/SQL procedure successfully completed.

SCOTT@orcl_11g> SELECT err_index_name, err_timestamp,err_text FROM ctx_user_index_errors ORDER BY err_timestamp;

no rows selected

SCOTT@orcl_11g> exec Loadfile_Concurso('test1.doc', to_date('26/07/2004','dd/MM/YYYY'), '501', 'doc');

PL/SQL procedure successfully completed.

SCOTT@orcl_11g> SELECT err_index_name, err_timestamp,err_text FROM ctx_user_index_errors ORDER BY err_timestamp;

no rows selected

SCOTT@orcl_11g> SELECT COUNT (*) FROM gc_concursodoc_idx
2 /

COUNT(*)
----------
2

SCOTT@orcl_11g> SELECT token_text FROM dr$gc_concursodoc_ctx$i
2 /

TOKEN_TEXT
----------------------------------------------------------------
CONTENTS
DOC
HALLOWEEN
ORIGINAL
TEST1
TESTING

6 rows selected.

SCOTT@orcl_11g> EXEC CTX_DOC.SET_KEY_TYPE ('ROWID')

PL/SQL procedure successfully completed.

SCOTT@orcl_11g> SELECT CTX_DOC.SNIPPET ('gc_concursodoc_ctx', ROWID, 'test%') FROM gc_concursodoc_idx
2 /

CTX_DOC.SNIPPET('GC_CONCURSODOC_CTX',ROWID,'TEST%')
--------------------------------------------------------------------------------
testing on halloween
This is the original contents of test1.doc.

SCOTT@orcl_11g>

how to use CTXSYS.CONTEXT index

SCOTT@orcl_11g> CREATE TABLE test_tab (test_col VARCHAR2(30))
2 /

Table created.

SCOTT@orcl_11g> INSERT ALL
2 INTO test_tab VALUES ('ab')
3 INTO test_tab VALUES ('a''b')
4 INTO test_tab VALUES ('a b')
5 SELECT * FROM DUAL
6 /

3 rows created.

SCOTT@orcl_11g> CREATE INDEX test_idx ON test_tab (test_col)
2 INDEXTYPE IS CTXSYS.CONTEXT
3 /

Index created.

SCOTT@orcl_11g> SELECT token_text FROM dr$test_idx$i
2 /

TOKEN_TEXT
----------------------------------------------------------------
AB
B

SCOTT@orcl_11g> SELECT * FROM test_tab WHERE CONTAINS (test_col, 'a''b') > 0
2 /

TEST_COL
------------------------------
a'b
a b

SCOTT@orcl_11g> SELECT * FROM test_tab WHERE CONTAINS (test_col, 'a b') > 0
2 /

TEST_COL
------------------------------
a'b
a b

SCOTT@orcl_11g> SELECT * FROM test_tab WHERE CONTAINS (test_col, 'ab') > 0
2 /

TEST_COL
------------------------------
ab


Another Example



SCOTT@orcl_11g> DROP INDEX test_idx
2 /

Index dropped.

SCOTT@orcl_11g> BEGIN
2 CTX_DDL.CREATE_PREFERENCE ('test_lex', 'BASIC_LEXER');
3 CTX_DDL.SET_ATTRIBUTE ('test_lex', 'PRINTJOINS', '''');
4 END;
5 /

PL/SQL procedure successfully completed.

SCOTT@orcl_11g> CREATE INDEX test_idx ON test_tab (test_col)
2 INDEXTYPE IS CTXSYS.CONTEXT
3 PARAMETERS ('LEXER test_lex')
4 /

Index created.

SCOTT@orcl_11g> SELECT token_text FROM dr$test_idx$i
2 /

TOKEN_TEXT
----------------------------------------------------------------
A'B
AB
B

SCOTT@orcl_11g> SELECT * FROM test_tab WHERE CONTAINS (test_col, 'a''b') > 0
2 /

TEST_COL
------------------------------
a'b

SCOTT@orcl_11g> SELECT * FROM test_tab WHERE CONTAINS (test_col, 'a b') > 0
2 /

TEST_COL
------------------------------
a b

SCOTT@orcl_11g> SELECT * FROM test_tab WHERE CONTAINS (test_col, 'ab') > 0
2 /

TEST_COL
------------------------------
ab

SCOTT@orcl_11g>

Text search in SQL query

Oracle TEXT is designed for such things. You can create a multi_column_datastore and use it as a parameter when creating a context index, then you can search quickly for various words in any of the columns in the datastore in various ways. The following is a simple example that searches for any rows that have both CPM and EK12 but not DXB plus the date condition. There are many other conditions that can be specified.

SCOTT@orcl_11g> CREATE TABLE test_tab
2 (id NUMBER,
3 rcvd_dt DATE,
4 msgtext VARCHAR2(15),
5 toptext VARCHAR2(15),
6 text_cols VARCHAR2(1))
7 /

Table created.

SCOTT@orcl_11g> INSERT ALL
2 INTO test_tab VALUES (1, SYSDATE - 1, 'CPM EK12', 'DXB', NULL)
3 INTO test_tab VALUES (2, SYSDATE - 1, 'CPM EK12', 'WHATEVER', NULL)
4 INTO test_tab VALUES (3, SYSDATE - 1, 'ABC DEF', 'WHATEVER', NULL)
5 INTO test_tab VALUES (4, SYSDATE, 'CPM EK12', 'WHATEVER', NULL)
6 SELECT * FROM DUAL
7 /

4 rows created.

SCOTT@orcl_11g> BEGIN
2 CTX_DDL.CREATE_PREFERENCE ('test_multi', 'MULTI_COLUMN_DATASTORE');
3 CTX_DDL.SET_ATTRIBUTE ('test_multi', 'COLUMNS', 'msgtext, toptext');
4 END;
5 /

PL/SQL procedure successfully completed.

SCOTT@orcl_11g> CREATE INDEX test_text_idx ON test_tab (text_cols)
2 INDEXTYPE IS CTXSYS.CONTEXT
3 PARAMETERS ('DATASTORE test_multi')
4 /

Index created.

SCOTT@orcl_11g> SELECT *
2 FROM test_tab
3 WHERE CONTAINS (text_cols, 'CPM AND EK12 NOT DXB') > 0
4 AND TRUNC (RCVD_DT) = TRUNC (SYSDATE-1)
5 /

ID RCVD_DT MSGTEXT TOPTEXT T
---------- --------- --------------- --------------- -
2 29-NOV-08 CPM EK12 WHATEVER

SCOTT@orcl_11g>

COMMIT and ROLLBACK in a trigger

A commit inside a trigger would defeat the basic definition of an atomic transaction (see ACID). Trigger logic is by definition an extension of the original DML operation. Changes made within triggers should thus be committed or rolled back as part of the transaction in which they execute. For this reason, triggers are NOT allowed to execute COMMIT or ROLLBACK statements (with the exception of autonomous triggers).

Here is an example of what will happen when they do:

SQL> CREATE TABLE tab1 (col1 NUMBER);
Table created.

SQL> CREATE TABLE log (timestamp DATE, operation VARCHAR2(2000));
Table created.

SQL> CREATE TRIGGER tab1_trig
2 AFTER insert ON tab1
3 BEGIN
4 INSERT INTO log VALUES (SYSDATE, 'Insert on TAB1');
5 COMMIT;
6 END;
7 /
Trigger created.

SQL> INSERT INTO tab1 VALUES (1);
INSERT INTO tab1 VALUES (1)
*
ERROR at line 1:
ORA-04092: cannot COMMIT in a trigger
ORA-06512: at "SCOTT.TAB1_TRIG", line 3
ORA-04088: error during execution of trigger 'SCOTT.TAB1_TRIG'

Autonomous transactions:
-----------------------
As workaround, one can use autonomous transactions. Autonomous transactions execute separate from the current transaction.
Unlike regular triggers, autonomous triggers can contain COMMIT and ROLLBACK statements.

Example:
SQL> CREATE OR REPLACE TRIGGER tab1_trig
2 AFTER insert ON tab1
3 DECLARE
4 PRAGMA AUTONOMOUS_TRANSACTION;
5 BEGIN
6 INSERT INTO log VALUES (SYSDATE, 'Insert on TAB1');
7 COMMIT; -- only allowed in autonomous triggers
8 END;
9 /
Trigger created.

SQL> INSERT INTO tab1 VALUES (1);
1 row created.

Note that with the above example will insert and commit log entries - even if the main transaction is rolled-back!

Remember that an "autonomous_transaction" procedure/function/trigger is a whole transaction in itself and so it must end with a commit or a rollback statement.

limits for block, record, subquery and label nesting

The following limits apply:

Level of Block Nesting: 255
Level of Record Nesting: 32
Level of Subquery Nesting: 254
Level of Label Nesting: 98

How to know the size of an package or procedure

SQL> select * from dba_object_size where name = 'procedure_name';

How to pass an object/table as an argument to a remote procedure

The only way to reference an object type between databases is via a database link. Note that it is not enough to just use "similar" type definitions.

Look at this example:

-- Database A: receives a PL/SQL table from database B
CREATE OR REPLACE PROCEDURE pcalled(TabX DBMS_SQL.VARCHAR2S) IS
BEGIN
-- do something with TabX from database B
null;
END;
/

-- Database B: sends a PL/SQL table to database A
CREATE OR REPLACE PROCEDURE pcalling IS
TabX DBMS_SQL.VARCHAR2S@DBLINK2;
BEGIN
pcalled@DBLINK2(TabX);
END;
/

SELECT from SQL*Plus but not from PL/SQL

PL/SQL respect object privileges given directly to the user, but does not observe privileges given through roles. The consequence is that a SQL statement can work in SQL*Plus, but will give an error in PL/SQL. Choose one of the following solutions:

Grant direct access on the tables to your user. Do not use roles!

GRANT select ON scott.emp TO my_user;

Define your procedures with invoker rights (Oracle 8i and higher);

create or replace procedure proc1
authid current_user is
begin

Move all the tables to one user/schema

How to execute operating system command from PL/SQL?

There is no direct way to execute operating system commands from PL/SQL. PL/SQL doesn't have a "host" command, as with SQL*Plus, that allows users to call OS commands. Nevertheless, the following workarounds can be used:

1. Database Pipes
-----------------
Write an external program (using one of the precompiler languages, OCI or Perl with Oracle access modules) to act as a listener on a database pipe (SYS.DBMS_PIPE). Your PL/SQL program then put requests to run commands in the pipe, the listener picks it up and run the requests. Results are passed back on a different database pipe.

CREATE OR REPLACE FUNCTION host_command( cmd IN VARCHAR2 )
RETURN INTEGER IS
status NUMBER;
errormsg VARCHAR2(80);
pipe_name VARCHAR2(30);
BEGIN
pipe_name := 'HOST_PIPE';
dbms_pipe.pack_message( cmd );
status := dbms_pipe.send_message(pipe_name);
RETURN status;
END;
/

2. External Procedure Listeners:
---------------------------------
From Oracle 8 one can call external 3GL code in a dynamically linked library (DLL or shared object). One just write a library in C/ C++ to do whatever is required. Defining this C/C++ function to PL/SQL makes it executable. Look at this External Procedure example.

3. Using Java
-------------

4. DBMS_SCHEDULER
-----------------

In Oracle 10g and above, one can execute OS commands via the DBMS_SCHEDULER package. Look at this example:

BEGIN
dbms_scheduler.create_job(job_name => 'myjob',
job_type => 'executable',
job_action => '/d01/oracle/ttt.sh',
enabled => TRUE,
auto_drop => TRUE);
END;
/

exec dbms_scheduler.run_job('myjob');

difference between %TYPE and %ROWTYPE

Both %TYPE and %ROWTYPE are used to define variables in PL/SQL as it is defined within the database. If the datatype or precision of a column changes, the program automatically picks up the new definition from the database without having to make any code changes.

The %TYPE and %ROWTYPE constructs provide data independence, reduces maintenance costs, and allows programs to adapt as the database changes to meet new business needs.

1.

%TYPE is used to declare a field with the same type as that of a specified table's column. Example:

DECLARE
v_EmpName emp.ename%TYPE;
BEGIN
SELECT ename INTO v_EmpName FROM emp WHERE ROWNUM = 1;
DBMS_OUTPUT.PUT_LINE('Name = ' || v_EmpName);
END;
/

2.

%ROWTYPE is used to declare a record with the same types as found in the specified database table, view or cursor. Examples:
DECLARE
v_emp emp%ROWTYPE;
BEGIN
v_emp.empno := 10;
v_emp.ename := 'XXXXXXX';
END;
/

How to write Dynamic sql in plsql

Starting from Oracle8i one can use the "EXECUTE IMMEDIATE" statement to execute dynamic SQL and PL/SQL statements (statements created at run-time).
Look at these examples.

[Note that the statements within quotes are NOT semicolon terminated:]

EXECUTE IMMEDIATE 'CREATE TABLE x (a NUMBER)';

-- Using bind variables...'
sql_stmt := 'INSERT INTO dept VALUES (:1, :2, :3)';
EXECUTE IMMEDIATE sql_stmt USING dept_id, dept_name, location;

-- Returning a cursor...
sql_stmt := 'SELECT * FROM emp WHERE empno = :id';
EXECUTE IMMEDIATE sql_stmt INTO emp_rec USING emp_id;

One can also use the older DBMS_SQL package (V2.1 and above) to execute dynamic
statements. Look at these examples:

CREATE OR REPLACE PROCEDURE DYNSQL AS
cur integer;
rc integer;
BEGIN
cur := DBMS_SQL.OPEN_CURSOR;
DBMS_SQL.PARSE(cur, 'CREATE TABLE X (Y DATE)', DBMS_SQL.NATIVE);
rc := DBMS_SQL.EXECUTE(cur);
DBMS_SQL.CLOSE_CURSOR(cur);
END;
/

More complex DBMS_SQL example using bind variables:

CREATE OR REPLACE PROCEDURE DEPARTMENTS(NO IN DEPT.DEPTNO%TYPE) AS
v_cursor integer;
v_dname char(20);
v_rows integer;
BEGIN
v_cursor := DBMS_SQL.OPEN_CURSOR;
DBMS_SQL.PARSE(v_cursor, 'select dname from dept where deptno > :x', DBMS_SQL.V7);
DBMS_SQL.BIND_VARIABLE(v_cursor, ':x', no);
DBMS_SQL.DEFINE_COLUMN_CHAR(v_cursor, 1, v_dname, 20);
v_rows := DBMS_SQL.EXECUTE(v_cursor);
loop
if DBMS_SQL.FETCH_ROWS(v_cursor) = 0 then
exit;
end if;
DBMS_SQL.COLUMN_VALUE_CHAR(v_cursor, 1, v_dname);
DBMS_OUTPUT.PUT_LINE('Deptartment name: '||v_dname);
end loop;
DBMS_SQL.CLOSE_CURSOR(v_cursor);
EXCEPTION
when others then
DBMS_SQL.CLOSE_CURSOR(v_cursor);
raise_application_error(-20000, 'Unknown Exception Raised: '||sqlcode||' '||sqlerrm);
END;
/

how to use truncate/drop/create in plsql

One can call DDL statements like CREATE, DROP, TRUNCATE, etc. from PL/SQL by using the "EXECUTE IMMEDIATE" statement (native SQL).
Examples:

begin
EXECUTE IMMEDIATE 'CREATE TABLE X(A DATE)';
end;
begin execute Immediate 'TRUNCATE TABLE emp'; end;
DECLARE
var VARCHAR2(100);
BEGIN
var := 'CREATE TABLE temp1(col1 NUMBER(2))';
EXECUTE IMMEDIATE var;
END;

How to read/write files from PL/SQL?

The UTL_FILE database package can be used to read and write operating system files.
A DBA user needs to grant you access to read from/ write to a specific directory before using this package.
Here is an example:

CONNECT / AS SYSDBA
CREATE OR REPLACE DIRECTORY mydir AS '/tmp';
GRANT read, write ON DIRECTORY mydir TO scott;

Provide user access to the UTL_FILE package (created by catproc.sql):

GRANT EXECUTE ON UTL_FILE TO scott;

Copy and paste these examples to get you started:

Write File
----------
DECLARE
fHandler UTL_FILE.FILE_TYPE;
BEGIN
fHandler := UTL_FILE.FOPEN('MYDIR', 'myfile', 'w');
UTL_FILE.PUTF(fHandler, 'Look ma, Im writing to a file!!!\n');
UTL_FILE.FCLOSE(fHandler);
EXCEPTION
WHEN utl_file.invalid_path THEN
raise_application_error(-20000, 'Invalid path. Create directory or set UTL_FILE_DIR.');
END;
/
Read File
---------
DECLARE
fHandler UTL_FILE.FILE_TYPE;
buf varchar2(4000);
BEGIN
fHandler := UTL_FILE.FOPEN('MYDIR', 'myfile', 'r');
UTL_FILE.GET_LINE(fHandler, buf);
dbms_output.put_line('DATA FROM FILE: '||buf);
UTL_FILE.FCLOSE(fHandler);
EXCEPTION
WHEN utl_file.invalid_path THEN
raise_application_error(-20000, 'Invalid path. Create directory or set UTL_FILE_DIR.');
END;
/
NOTE: UTL_FILE was introduced with Oracle 7.3. Before Oracle 7.3 the only means of writing a file was to use DBMS_OUTPUT with the SQL*Plus SPOOL command.

How to Find a string in plsql code

How to know where certain tables, columns and expressions are referenced in your
PL/SQL source code.


SELECT type, name, line
FROM user_source
WHERE UPPER(text) LIKE UPPER('%&KEYWORD%');

how to detect changes in plsql code

SELECT OBJECT_NAME,
TO_CHAR(CREATED, 'DD-Mon-RR HH24:MI') CREATE_TIME,
TO_CHAR(LAST_DDL_TIME, 'DD-Mon-RR HH24:MI') MOD_TIME,
STATUS
FROM USER_OBJECTS
WHERE LAST_DDL_TIME > '&_given_data;

Difference Between PL/SQL and Java in procedure/trigger

PL/SQL:
Can be used to create Oracle packages, procedures and triggers
Data centric and tightly integrated into the database
Proprietary to Oracle and difficult to port to other database systems
Data manipulation is slightly faster in PL/SQL than in Java
PL/SQL is a traditional procedural programming language
Java:
Can be used to create Oracle packages, procedures and triggers
Open standard, not proprietary to Oracle
Incurs some data conversion overhead between the Database and Java type
Java is an Object Orientated language, and modules are structured into
classes ava can be used to produce complete applications

dbms_xmlgen

dbms_xmlgen.newContextFromHierarchy(queryString IN VARCHAR2)
RETURN ctxHandle;

set serveroutput on

DECLARE
qryctx dbmx_xmlgen.ctxhandle;
result XMLTYPE;

PROCEDURE lob_output (p_clob CLOB) IS l_clob CLOB;
l_clob_length NUMBER;
l_iterations NUMBER;
l_chunk VARCHAR2(32767);
l_chunk_length NUMBER := 32767;
BEGIN
l_clob := p_clob;
l_clob_length := dbms_lob.getlength(l_clob);
l_iterations := CEIL(l_clob_length / l_chunk_length);

FOR i IN 0 .. l_iterations - 1 LOOP
l_chunk := dbms_lob.substr(l_clob,l_chunk_length,i*l_chunk_length+1);
dbms_output.put_line(l_chunk);
END LOOP;
END;
BEGIN
qryctx := dbms_xmlgen.newcontextFromHierarchy('SELECT level,
XMLElement("Position", XMLElement("Name", ename), XMLElement("Title",
job)) FROM emp CONNECT BY PRIOR empno = mgr START WITH mgr is NULL');

result := dbms_xmlgen.getxmltype(qryctx);

dbms_xmlgen.closeContext(qryctx);

lob_output(RESULT.getClobVal());
END;
/

Sunday, September 20, 2009

SGA Allocation formula or guidelin

Many guide for SGA just follow and study
Many ways to allocate SGA


1.

2.

Oracle EM fixing/reconfiguration

Oracle EM fixing/reconfiguration
for this follow the link:-

History of PL/SQL code changes

One can build a history of PL/SQL code changes by setting up an AFTER CREATE schema (or database) level trigger (available from Oracle 8.1.7). This way one can easily revert to previous code should someone make any catastrophic changes. Look at this example:

-- Create a table
CREATE TABLE SOURCE_HIST
AS SELECT SYSDATE CHANGE_DATE, USER_SOURCE.*
FROM USER_SOURCE WHERE 1=2;

-----create a trigger to store in that table

CREATE OR REPLACE TRIGGER change_hist
AFTER CREATE ON SCHEMA
DECLARE
BEGIN
IF ora_dict_obj_type IN
('PROCEDURE', 'FUNCTION', 'PACKAGE', 'PACKAGE BODY', 'TYPE')
THEN
INSERT INTO source_hist
SELECT SYSDATE, user_source.*
FROM user_source
WHERE TYPE = ora_dict_obj_type AND NAME = ora_dict_obj_name;
END IF;
EXCEPTION
WHEN OTHERS
THEN
raise_application_error (-20000, SQLERRM);
END;


ref-

Redo nologging

Introduction
----------------
This paper covers the subject of reducing redo generation (LOGGING) using NOLOGGING. It is a follow up to the Redo reduction with maximum Recoverability paper. The first paper covers some useful subjects about redo generation which are not mentioned in this paper.

Important points about NOLOGGING
--------------------------------
Oracle gave the user the ability to limit LOGGING (redo generation) on tables and indexes by setting them in NOLOGGING mode but before going into how to limit the redo generation it is important to note the following points about LOGGING:
• NOLOGGING is designed to handle bulk inserts for data which can be re-produced.
• LOGGING should not be disabled on a primary database if it has one or more standby databases. For this reason oracle introduced the ALTER DATABASE FORCE LOGGING command in Oracle 9iR2. If this command is run then logging is not disabled on any database object regardless of this object logging mode.
• Any change to the database dictionary will cause redo generation.
• Data which are not logged can not be recovered. The data should be backed up after the modification.
• NOLOGGING does not apply to UPDATE, DELETE or normal INSERT.
• Tables and indexes should be set back to LOGGING mode when the NOLOGGING is no longer needed.
• NOLOGGING is not needed for Direct Path Insert if the database is in NOArchivelog mode.

Disabling Logging
----------------
Logging can be disabled at the table level or the tablespace level. If it is done at the tablespace level then every newly created index or table in this tablespace will be in NOLOGGING mode. A table or an index can be create with NOLOGGING mode or it can be altered using ALTER TABLE/INDEX NOLOGGING;. It is important to note that just because an index or a table was created with NOLOGGING does not mean that redo generation has been stopped for this table or index. NOLOGGING is active in the following situations and while running one of the following commands but not after that. This is not a full list:
o direct load (SQL*Loader)
o direct load INSERT (using APPEND hint)
o CREATE TABLE ... AS SELECT
o CREATE INDEX
o ALTER TABLE MOVE
o ALTER TABLE ... MOVE PARTITION
o ALTER TABLE ... SPLIT PARTITION
o ALTER TABLE MERGE PARTITION
o ALTER TABLE MODIFY PARTITION ADD SUBPARTITON
o ALTER TABLE MODIFY PARTITION COALESCE SUBPARTITON
o ALTER INDEX ... SPLIT PARTITION
o ALTER INDEX ... REBUILD
o ALTER INDEX ... REBUILD PARTITION

Logging is stopped only while one of the commands above is running so if a user runs:
ALTER INDEX foo_idx NOLOGGING ;
The actual rebuild of the index does not generate redo (all data dictionary changes associated with the rebuild do) but after that any DML on the index will generate redo this includes direct load insert on the table which the index belongs to.
Here is another example to make the point clear:
Create table tab_nolog NOLOGGING(….);
All the following statements will generate redo despite the fact the table is in NOLOGGING mode:
---------------
Insert into tab_nolog .., Update tab_nolog set …, delete from tab_nolog ..
The following WILL NOT generate redo (apart from dictionary changes and indexes):
Insert /*+ APPNED +/ ..
Alter table tab_nolog move ..
Alter table tab_nolog move partition …

Using NOLOGGING
---------------
To activate the NOLOGGING for one of the ALTER commands above add the NOLOGGIN clause after the end of the ALTER command. For example :
Alter table foo move partition part1 tablespace ts1 NOLOGGING;
The same applies for CREATE INDEX but for CREATE TABLE the NOLOGGING should come after the table name. Example:
Create table tab_with_nologging NOLOGGING as select * from big_tab;
It is a common mistake to add the NOLOGGING at the end because oracle will consider it an alias and the table creation will generate a lot of logging.

To user Direct Path Load in SQL * Loader you must run the $ORACLE_HOME/rdbms/admin/catldr.sql script before your first sqlldr is run in direct path mode. To run sqlldr in direct path mode use direct=true. For further information refer to Conventional and Direct Path Loads
Direct Path Insert is discussed bellow.

DIRECT PATH INSERT
-----------------
To use Direct Path Insert use the /*+ APPEND */ hint as follow:
INSERT /*+ APPEND */ into … SELECT …
When direct path insert is used oracle does the following:
 Format the data to be inserted as oracle blocks.
 Insert the blocks above the High Water Mark (HWM)
 When commit takes place the HWM is moved to the new place. The process is done bypassing the buffer cache.

It is clear that direct load is useful for bulk inserts. Using it to insert few hundred records at a time can have bad effect on space and performance.
It is very important to understand how Direct Path Inserts affects redo generation. As mentioned above it does not affect indexes but it is affected by the following factors:
• The database Archivelog mode.
• Using the /*+ APPEND */ hint.
• The LOGGING mode of the table.
• The FORCE LOGGING mode of the database (from 9i R2).

If the database is in FORCE LOGGING mode then Oracle will treat the table as if it was in LOGGING mode regardless of its mode. To find out if the database is in FORCED LOGGING or not run:
select FORCE_LOGGING from v$database ;
If the /*+ APPEND */ Hint is not used then the insertion will generate the normal amount of redo regardless of the other factors.
This table will show the relation between ARCHIVELOG mode and having the table in LOGGING mode when the /*+ APPEND */ hint is used. This does not include index redo nor does it include data dictionary changes redo

LOGGING MODE ARCHIVELOG NOARCHIVELOG
LOGGING Redo No Redo
NOLOGGING No Redo No Redo


Reducing Logging For Bulk DML
-----------------------------
-----------------------------
Bulk Inserts
------------
To load bulk data using Direct Path.
o set table in nologging mode. Alter table table_name nologging;
o alter index index_name unusable ;
o alter session set skip_unusable_indexes=true ;(*)
o Insert /*+ APPEND */ into table_name select …
o Alter index index_name rebuild nologging;
o Alter table table_name logging ;
o Alter index index_name logging ;
o Backup the data.

(*)skip_unusable_indexes is an instance initialization parameter in 10g and it default to true. Before 10g, skip_unusable_indexes needs to be set in a session or the user will get an error. It is a good practice to set it in a session, regardless of the database version, when the above is done.


There is no direct way (at the time of writing this document) of reducing redo generation for bulk update and delete. The user needs to reduce the workload on the database.

Bulk Delete:
------------
1. Create a new_table with no logging, CREATE TABLE table_name NOLOGGING (….); THE NOLOGGING COMES AFTER THE TABLE_NAME NOT AT THE END OF THE STATEMENT.
2. Insert /*+ Append */ into new_table select the records you want to keep from current_table.
3. Create the indexes on the new table with NOLOGGING (*)
4. Create constraints, grants etc.
5. Drop current_table.
6. Rename new_table to current.
7. Alter new_table and indexes logging.
8. Backup the data.

(*) If the data left is so small or there are a lot of dependencies on the table (views, procedures, functions) the following steps can be used instead of 3-6 above
3. Disable constrains on current_table;
4. Truncate current_table;
5. make indexes unusable;
6. alter current table NOLOGGING ;
7. Insert /*+ APPEND */ into current_table select * from new_table ;
8. commit;
9. rebuild indexes with NOLOGGING;
10. enable constraints
11. Put current table and indexes in LOGGING mode
12. backup the data
13. drop table new_table;

Bulk Update:
-----------
Follow the steps for bulk Delete but integrate the update within the select statement. Lets say that you want to update the value column in the goods table by increasing it by 10% the statement will be like:
1. Create a new_table with no logging, CREATE TABLE table_name NOLOGGING (….); THE NOLOGGING COMES AFTER THE TABLE_NAME NOT AT THE END OF THE STATEMENT.
2. Insert /*+ Append */ into new_table select (update statement eg: col1, col2* 1.1,…)
3. Create the indexes on the new table with NOLOGGING (*)
4. Create constraints, grants etc.
5. Drop current_table.
6. Rename new_table to current.
7. Alter new_table and indexes logging.
8. Backup the data.

Backup And Nologging
-------------------
It is important that the data loaded using NOLOGGING can be loaded again if you need to. If the database crashed before backing up the new data then this data can not be recovered.
Here are the two scenarios of backing up:
Export (exp or expdp)
This method will allow you to recover the loaded data up to the point the export was taken but not later.

Hot Backup
----------
In order to recover any additional data or modification to the table you bulk inserted into using NOLOGGIN the least you need to do is a hot backup of that tablespace. Remember you still generate redo for DML on the table when it is in NOLOGGING mode but you are strongly advised to put it in LOGGING mode in case you run one of the operations mentioned in the Disabling Logging section.

Basic connections in Oracle Database

Basic connections and Appropriate Privileges in Oracle Database

Ordinarily, users like Scott, HR or any other user without the almighty SYSDBA system privilege cannot perform the startup or shutdown of any Oracle database because, the authentication for such users are processed against the data dictionary. For these users, it is impossible to startup or create any database, since the data dictionary can not be read until the database is created and opened.

When you launch a connection to the Enterprise Manager or Database Control (in 10g or higher), you don’t have the option to use the operating system authentication, but this connects via the listener, as if it is a remote process.
With that being said, your listener must be up and running for you to be able to establish a connection otherwise, you won’t be able to.

The following are various ways to establish connections to the database:

a.) Connect user/pass[@db]
This type of connection is established using the data dictionary authentication. Here Oracle will validate the username and password combination against values stored in the data dictionary. Obviously, the database must be up and running for the connection to go through. In other words, the database must be opened for the connection to be successful and even if you are Tom Kyte, you cannot issue the startup or shutdown commands. 

b.) Connect user/pass[@db] as sysdba
This type of connection instructs Oracle to go through the external password file to validate the username and password combination before the connection can go through.

c.) Connect user/pass[@db] as sysoper
This type of connection instructs Oracle to go through the external password file to validate the username and password combination before the connection can go through.

d.) Connect / as sysdba
This type of connection uses the operating system authentication by asking Oracle to go to the host operating system to validate if the user running SQL*Plus is a member of the operating system group that owns the Oracle binary/software and if the OS concurs with this, then the user will be able to log in as SYSDBA, without the username or password.

e.) Connect / as sysoper
This type of connection uses the operating system authentication by asking Oracle to go to the host operating system to validate if the user running SQL*Plus is a member of the operating system group that owns the Oracle binary/software and if the OS concurs with this, then the user will be able to log in as SYSOPER, without the username or password.

Any user using b, c, d, or e, examples above would be able to issue the startup or the shutdown commands and also will connect to the database no matter the state. Even, if the database is not yet created.

On the other hand, users using a, b, or c, examples above, include the database connection strings. This is necessary if establishing connection across the network.
Naturally, this type of connection is not an option for the operating system authentication because the OS authentication expects the user to log onto the machine hosting the Oracle server, either directly or indirectly – using SSH/Putty or telnet.

Database duplication using Recovery Manager

Introduction
This document provides you with a brief description on how to do refresh a Database (duplicate a database) from the Production Database backup taken using RMAN to tapes to any other environments.

I. Introduction
---------------
This document describes the process of refreshing the Test Databases from the RMAN Production database backups taken to the Tapes / Disks.

II. Initial Preparation Steps
-----------------------------
We first need to make sure that the database is not running within Fail Safe environment, and that the disk space used by the old database is released, so we can fit the new database. Here are a few steps that need to be taken cars of before starting the Database Refresh:

1. If there is requirement to preserve some data or accounts (Schema’s and other necessary things) from the old environment, export that data first before starting the refreshing from Production.

2. If the databases are running in Fail Safe environment, shut them down through Fail Safe Manager. Also shut down the Listener and the Intelligent Agent that is running for those particular databases.

3. Modify the TNSNAMES.ORA, the INIT.ORA (and listener.ora if required) files to make sure the database can be started independently, using the local listener. Try this out by starting the listener service and database service manually through the Services screen on the Win 2000 machine, and starting database through SQL*Plus.

4. Shut down the database using the FAILSAFE Manager and remove all database files except ones from the Admin directories (e.g. init.ora). This is required to clean space on disk to fit the new database. If we have enough disk space for the restore to happen, then we move the existing files to a different directory or mount point.

III. Preparing RMAN Duplication Script
--------------------------------------
Once we are done with the above steps, we can proceed with the next step of creating the scripts for restoring the Database. An example of this script is given below:
connect catalog rman/password@;
connect target sys/password@;
connect auxiliary sys/password@;
run {
allocate auxiliary channel ch1 type 'sbt_tape' parms
'ENV=(TDPO_OPTFILE=c:\cluster\tdpo.opt)';
set until scn or ;

set newname for datafile 1 to ‘new path for restore’;
. . . . . .
. . . . . .
...
duplicate target database to
logfile group 1 (‘’,
(‘’) SIZE 100M,
group 2 ((‘’,
(‘’) SIZE 100M,
group 3 ((‘’,
(‘’) SIZE 100M;
}

The description of the above script is as follows :

The first part deals with connecting to required databases:

1. catalog database where RMAN catalog is stored,
2. target database which is the database we want to clone, and
3. auxiliary database which is the one that we are attempting to create.
4. Note that when running this script later on, both catalog and target databases need to be open during the process, while auxiliary database is normally in NOMOUNT state.

Next in the script is allocating channel used to access file system through TSM. Note that to do that we will need to change TSM configuration (dsm.opt file, nodename parameter) in order for the node to appear as the production node.
Next in the script is set until SCN / DATE command that specifies until which point the database will be duplicated. If the UNTIL SCN / DATE is not mentioned, RMAN will attempt to recover until the last archived log, which can cause failure if that log is not available on the tape drive (e.g. it is still on the production server disk).
Next is the list of set newname for datafile commands, which are required when new disk structure is different from production disk structure (which is case on all our systems). All database files should be specified in this list (nothing is required for tempfiles). The list of datafile’s can be obtained by querying the DBA_DATA_FILES data dictionary view.
Finally, the duplicate command is there to do the actual database duplication.

IV. Running Database Duplication
--------------------------------
To run database duplication we can prepare a batch script, or run a command to start it up. It would look something like this:

rman cmdfile (the rman script) msglog (a file name for the logs)

Before starting the RMAN script, the following things need to be taken care of :
1. Verify that the RMAN catalog database is open. Make sure this database will be open during complete duplication process, e.g. if it normally goes down for backup turn off the backup procedures. If the connection to the database is lost during the duplication, the process will fail and will need recovery.

2. Verify that the target database is open. Make sure this database will be open during complete duplication process, same as for RMAN database.

3. Verify that the Oracle services for auxiliary database are running and the database is in nomount state.

If the RMAN script is successful, it will get all the files from the file system, place them in appropriate locations as specified in the script, and recover the database. It will also change the Database ID, and start the database. This is the best case scenario, however, if duplication script fails you might need to try and recover from failure.

V. Recovering from Failure
--------------------------
If the RMAN duplication process fails, We might need to recover the database using the RMAN backup. The Database supplication or the restore can fail because of some reasons like :

1. RMAN catalog database going down for backup
2. Archived logs not available on the file system (when set until SCN was not specified in the script). In those cases you might try following steps to recover, first run the switch clone command through RMAN (After CONNECTING to the TARGET, CATALOG and AUXILIARY Databases) :
run{
switch clone datafile all;
}
Afterwards, try recreating the control file. RMAN first creates a control file but does not have all data files specified in there (it creates that one later). Best way is to backup control file to trace on the target database, and modify that script to run in auxiliary database. Changes to the script are typically: use new filenames as the location might have changed, set new database name, and use RESETLOGS clause.
Once control file is created and executed, complete recovery of the database until specified SCN, the RMAN script can look something like this :
run {
allocate auxiliary channel ch1 type 'sbt_tape' parms 'ENV=(TDPO_OPTFILE=c:\cluster\tdpo.opt)';
set until scn 6899135273;
recover
clone database
check readonly;
release channel ch1;
}
This step will obtain all required archived logs from the file system and apply them to the database. After recovery is completed you can open the database:
alter database open resetlogs;
That would complete the recovery. Note that when recovered this way, the database Id is still the same as for production, thus you cannot use RMAN to backup that database (unless you are using different catalog). Consequently, one should always strive to have the database duplicated properly through RMAN without failures.

VI. Post Refresh Steps
----------------------
After the database is duplicated, there are few steps that might be required:
1. In some environments , it may be required to change the Mode to noarchivelog mode as the Production is mainly run in Archivelog mode.

2. Add files to temporary tablespaces. When the database is restored all files and tablespaces will exist, however, none of the temp files will be created. One needs to add tempfiles to temporary tablespaces.

3. Drop all database links, and recreate them to point to proper environment. After duplication, new database will have same database links as the production, thus pointing to the production database. All the database links should therefore be dropped, and new ones created to point to the new environment.

4. If the new database is running in the Fail Safe environment, one will need to rebuild the password file on the other node (the one that was not used when duplicating the database). If this is not done, the database will not start on that node and the whole Fail Safe group will be moved to other node.

5. Revert back changes to tnsnames.ora (and listener.ora if applicable) to make sure the database can start within Fail Safe.

6. Revert back changes done to the TSM configuration files (dsm.opt).

7. Shut the database, stop local listener and database services, and start the listener and database within Fail Safe.

8. Make sure the database can fall-over correctly to another node, by moving Fail Safe group manually.

Analyze versus DBMS_STATS

DBMS_STATS is recommended process or technique to generate stats of the database objects.
Analyze is dangerous to sql statement execution plan.
The basic concern is " Time elapsed in case of DBMS_STATS should be less than ANALYZE"

Table_name is EXT_PUB

SELECT COUNT(*) FROM EXT_PUB ( 210 columns)
157207324
****************
ANALYZE TABLE

Analyze table EXT_PUB estimate statistics sample 20 percent;
Time Elapsed 2:53:01

DBMS_STATS

CASE 1:
20% sample size and paralle degree 3

Execute DBMS_STATS.GATHER_TABLE_STATS(OWNNAME =>'ORACLE',TABNAME => 'EXT_PUB',ESTIMATE_PERCENT =>20,degree => 3);
Time Elapsed 3:06:13
its taking more time than analyze .

CASE 2:
sample size 20% , method_opt is for all indexed columns , degree value 3, stats generation for all indexes in single statement

Result: Time elapsed reduce by 76% (approx)

Execute DBMS_STATS.GATHER_TABLE_STATS(OWNNAME =>'ORACLE',TABNAME => 'EXT_PUB',ESTIMATE_PERCENT =>20,method_opt=> 'for all INDEXED COLUMNS size 1',cascade => true,degree=>3);
Time Elapsed 00:44:17

CONCEPT IS : By default DBMS_STATS generates histogram for all the columns under method_opt option so you can change that parameter only for indexed columns .

EXPORT statistics from Prodcution to Test enviromets
e.g copy Stats of gca_ds_prs_ext_pub ( Prod to Test)

exec dbms_stats.create_stat_table(ownname => 'ORACLE',STATTAB => 'NS_STATS_DEV');

1. EXEC DBMS_STATS.EXPORT_TABLE_STATS(OWNNAME => 'ORACLE',TABNAME=> ' EXT_PUB',STATTAB => 'NS_STATS_DEV',STATID => 'EXID');

2. export table "NS_STATS_DEV" ( $ exp )
3. Move exp.dmp to TEST
4. IMPORT "NS_STATS_DEV" TABLE ($ imp )

5. SELECT LAST_ANALYZED FROM USER_TABLES WHERE TABLE_NAME='EXT_PUB';
07:31:43

6. exec dbms_stats.import_table_stats(ownNAME=> 'ORACLE',tabname => ' Ext_pub',stattab => 'ns_stats_dev',STATID => 'EXID');

7.SELECT LAST_ANALYZED FROM USER_TABLES WHERE TABLE_NAME='EXT_PUB';
07:30:27

DBMS_STATS is recommended to generate stats of the oracle database objects over Analyze

DBMS_STATS is recommended process or technique to generate stats of the database objects.
Analyze is dangerous to sql statement execution plan.
The basic concern is " Time elapsed in case of DBMS_STATS should be less than ANALYZE"

Table_name is EXT_PUB

SELECT COUNT(*) FROM EXT_PUB ( 210 columns)
157207324
****************
ANALYZE TABLE

Analyze table EXT_PUB estimate statistics sample 20 percent;
Time Elapsed 2:53:01

DBMS_STATS

CASE 1:
20% sample size and paralle degree 3

Execute DBMS_STATS.GATHER_TABLE_STATS(OWNNAME =>'ORACLE',TABNAME => 'EXT_PUB',ESTIMATE_PERCENT =>20,degree => 3);
Time Elapsed 3:06:13
its taking more time than analyze .

CASE 2:
sample size 20% , method_opt is for all indexed columns , degree value 3, stats generation for all indexes in single statement

Result: Time elapsed reduce by 76% (approx)

Execute DBMS_STATS.GATHER_TABLE_STATS(OWNNAME =>'ORACLE',TABNAME => 'EXT_PUB',ESTIMATE_PERCENT =>20,method_opt=> 'for all INDEXED COLUMNS size 1',cascade => true,degree=>3);
Time Elapsed 00:44:17

CONCEPT IS : By default DBMS_STATS generates histogram for all the columns under method_opt option so you can change that parameter only for indexed columns .

EXPORT statistics from Prodcution to Test enviromets
e.g copy Stats of gca_ds_prs_ext_pub ( Prod to Test)

exec dbms_stats.create_stat_table(ownname => 'ORACLE',STATTAB => 'NS_STATS_DEV');

1. EXEC DBMS_STATS.EXPORT_TABLE_STATS(OWNNAME => 'ORACLE',TABNAME=> ' EXT_PUB',STATTAB => 'NS_STATS_DEV',STATID => 'EXID');

2. export table "NS_STATS_DEV" ( $ exp )
3. Move exp.dmp to TEST
4. IMPORT "NS_STATS_DEV" TABLE ($ imp )

5. SELECT LAST_ANALYZED FROM USER_TABLES WHERE TABLE_NAME='EXT_PUB';
07:31:43

6. exec dbms_stats.import_table_stats(ownNAME=> 'ORACLE',tabname => ' Ext_pub',stattab => 'ns_stats_dev',STATID => 'EXID');

7.SELECT LAST_ANALYZED FROM USER_TABLES WHERE TABLE_NAME='EXT_PUB';
07:30:27

PERFORMANCE TUNING IN ORACLE

Introduction to this series
--------------------------
Tuning a database or database applications is a time-intensive, repetitive task. Over time, the database changes in many ways and must be reevaluated. In addition, changes made to improve performance will need to be evaluated. Did the change actually improve performance? If so, how much? How much effort did it take to achieve the gain (or loss)? What is "good performance" and how can we make it better?
This series identifies a standard approach for performing or evaluating database tuning efforts. It is applicable to data warehouses, custom databases, custom database applications or Oracle Applications Database. The criteria used in the evaluations will depend on individual circumstances. For example, a data warehouse has different performance requirements than a customer order application. These requirements will affect the database architecture, which affects how performance can be optimized. In short, the goal is to identify how performance can be optimized, as measured by how operations are transacted by users.
When users start complaining that the database is too slow, the natural desire is to tune everything. But the real answer, based on customer needs, is a combination of training, tuning and business process management. This primer identifies the basic elements of performance tuning, the tools used and a basic methodology for obtaining the information used to evaluate a database. It is designed for the non-technical user i.e., the applications user, project managers or anyone who would like understand what those techies and sysadmins do, and why they talk to themselves...
It identifies the basic steps for each stage of performance tuning, explains what tools may be used and function or expected results yielded by the tool. It also provides a set of basic questions for initial performance problem assessment -- a manager's checklist to guide the non-technical manager to the business of performance tuning.

What is good performance?
There are a number of published articles on what constitutes good -- and bad -- performance. These articles are usually defined for a specific architecture. This is because the machine configuration -- size, speed and number of CPUs significantly impacts the system. Simply put, some systems have more horsepower than others. However, CPU is not the most important measure in dealing with performance issues. The most important measure is response time. Time is the metric the user understands, time is the measure the manager uses and time is the most effective measure of performance improvement. While there are many important ratios that measure whether a database is working effectively, the goal is for the database to effectively support user transactions. Tuning hit ratios and cache statistics is like patching potholes. Sometimes patching fixes the problem; sometimes repaving is better. That's why a more effective measure of performance tuning results is response time. Response time concentrates on the specific business process that users have identified as problematic.

Planning a tuning engagement is like the initial stages of any project and uses the same steps:
1. Determine what you've got.
How big is it in terms of users, machines, operating systems and connections. This is the current state.

2. Determine what they think they need.
What does the user see as the primary problem? No matter what you actually find (good, needing improvement, dearly beloved, etc.), you will frame your results and recommendations in terms of the user's perceptions. This is the future state, or the "to-be" state. It establishes an understanding of perceived priorities.

3. Determine the details of what you have.
In this case, you or your tame techie will use commands and scripts to retrieve detailed information about each area under consideration.

4. Establish goals.
If it ain't broke, don't fix it. What is it that returns the most value for the user? Remember that tuning for tuning's sake is not effective….

5. Prepare a plan.
Documenting your plan is an important management. If you are lucky, when you distribute your plan, you'll get feedback from all sides. (Recite this daily: "Feedback is my friend.") Feedback allows you to plan targeted communication as results are achieved.

6. Work the plan.
Your improvement plan will probably have multiple phases. At each phase completion, evaluate the accomplishments in comparison to both the original baseline and with accomplishments of previous phases. Update your communications, give feedback to the users regarding results.

7. Keep going until you reach your goal or reassess your goals.
As time elapses, priorities may change. For instance, a company with seasonal sales may need to have all performance enhancements completed before the sales rush or put in place some temporary "fixes" for the duration. Care must be taken when introducing temporary solutions, however.
In step 1 we identified the elements of a basic performance tuning plan. The next step is to determine the "current state" of the system. The current state is documented in order to form a baseline for comparison in later stages. We will also look at determining relative priorities with users and management, and how to define the details of the current state.
The current state
--------------------
Determining the current state involves both users and the technical architecture of the database. This includes working with knowledgeable users, often called power users, to find out what they see as problems. Ask your power users if they have observed any specific trends, jot down what time problems occur and what they were doing. Take the time to observe the user performing their problematic tasks and record the steps. Using the same steps every time establishes a baseline for comparison. When working with the user, take care to ask questions, but not to jump to conclusions or make promises about how much you will be able to change. You are in the evaluation phase and changes may be technical, training related or both.
Check the database size and basic parameters. This can be done using the enterprise manager or a set of basic SQL scripts. Do not make changes at this time. It is important to establish a baseline for comparison first.

Check the database architecture.
Identify network nodes used, the size and location of database files.
Identify mount points.
Identify network connectivity parameters, including packet sizes.
Prepare a network diagram for future reference.
Check how database transactions are accomplished.
How many users are there?
What software and hardware do they use?
What transactions have been identified as problematic?

Later technical personnel will check the actual code used in transactions to determine effectiveness. Is the system shared (i.e., does a single server house production and development instances)? Are other types of applications housed on the system? In one corporate IT department, top-of-the-line mainframes housed database applications as well as manufacturing production tracking applications, creating contention for resources. Changes made must be evaluated in concert with ALL operations affected.

Can CPUs be added? What is the predicted effect and how does it compare with the cost to add? Scalable architectures may allow different sizes and numbers of CPUs. CPUs provided by different manufacturers may have different ratings as well. Whatever machine is run, a general rule of thumb is to know your CPU utilization target. Often this is a goal of 30 to 50 percent. What this means is that during normal, off-peak operation, measurements should indicate sufficient additional capacity for peak times, with an allowance for times of unusual load. Oracle Applications performance is not stable when CPU is over 80%.
Is the system maintained internally or externally? Is there a maintenance requirement that stipulates availability or capacity to be provided? If there are service level agreements (SLAs) in place, some improvements may be made at less (or more) cost. Additionally, the more organizations involved, the more time it may take to change performance.
Documentation
-------------
It is important to document the baseline, distribute the information and maintain accurate records over time. Where more than one organization is involved, the need to establish priorities agreed upon -- or negotiated with all interested parties is particularly important.
Typical documentation (a.k.a. "deliverables") for a performance tuning project will include:
1. System or technical architecture document
2. Network architecture
3. Database architecture
4. Capacity plan
5. Change management plan
6. Test plans and procedures
7. Maintenance plan
Plan for your deliverables early in the project. (Establish a plan, use the plan to focus, fill in the plan as you go.) Evaluate existing documentation to examine whether the data is still valid. Existing documentation may provide key information regarding previous problems, agreements that have been made or projections where problems are expected to occur.
While examining the system and objectively asking questions of the users, you must consider what gives the most bang for the buck.

Understanding priorities
Base performance goals on the customer's system, established baseline:
1. Meet with stakeholders to determine their perception of what is needed.
2. Meet with primary users identified as knowledgeable personnel by the stakeholder. Observe problems. Understand the business process. List ALL customer issues.
3. Get baselines.
4. Based on preliminary information, agree on improvement goals with customer. Establish that applications performance improvements made by procedural changes, network architectural changes, database changes or SQL tuning. In many instances, retraining users, concurrent manager re-llocation, can affect major improvements.
Make sure that the customer agrees with your strategy and present the goals in a measurable context. Establish priorities for each area to be addressed. Document the baseline, performance goals. Goals should be stated clearly and concisely and with measurable parameters.
For instance:
1. Sales order booking takes 45 seconds for one order line. We need it to run in five seconds for one order line.
2. Custom report: Update raw material costs takes 30 minutes to complete in off-peak hours. We need regular updates throughout the day without impacting sales order pricing.
3. Web pricing request takes 17 seconds to retrieve and display 4X4 cost/delivery matrix.

Be careful what you promise. Vendor published baselines represent aggressively tuned, optimal systems. Any promises you make to your users should be based on improving existing performance and any existing SLAs.
Evaluating the baseline will take into account information from all levels of users. A multilayer approach helps provide a 360-degree view of the system. Each type of user is asked the same questions and encouraged to give detailed examples. (When the user asserts, "Performance is rotten! I can't get my work done!" they are asked, "What are you doing when you notice it is so bad? Is it always bad, or is it sometimes bad? Does everyone have the same problem?"
Obtaining details
-------------------
The assessment process...
Is highly collaborative. It takes information from all types of users/interested parties.
Focuses on business. It identifies business processes, rather than imposing a technical solution.
Helps identify change impact. By identifying business processes affected, it assists in creation of important performance metrics.
Leads to measurable benefits. The metrics identified will be used in subsequent comparisons.
Supports future vision. The assessment and the performance tuning/analysis will often be used to support the justification for technical purchases such as more memory, additional hard drives or faster hard drives. It may also suggest changing existing configuration. On one system, a combination of disk shadowing and the selected RAID configuration created considerable degradation. What is a suitable RAID configuration for software development or manufacturing systems may not be suitable for databases.
May identify new models. As you discuss the system and obtain information, you may need to assure each set of users that you have no particular agenda and that your goal is to obtain as complete information as possible. Assure them that the results of the investigation will be available, and no solution technical or otherwise can be decided until all information is available. This is where having a planned set of deliverables is critical.
The goal for the assessment is to...
Define the problem(s) quickly. The users responses will probably be able to be combined into categories. These categories provide the general areas where you will concentrate your efforts.
Define the problem accurately. Your problems should be reported in terms of measurable behaviors. Often the performance tuning will be alleviated in stages. One set of "fixes" will improve performance, but additional improvements may be made with subsequent actions.
Identify the appropriate resources. Your problem definition will include the scope of the tuning's responsibility. For instance, while your information may reveal that hardware components are undersized, the purchase and installation may not be possible in near

term. Identify the times that resources are available; certain types of testing may be required to be performed in off-hours, or some personnel may be unavailable during certain periods.
Solve the problem quickly. By structuring the problem statements, the analysis and the recommended solutions, you prepare the customer for your methods and the time it will take to analize.
You may provide test/reporting scripts for the user to evaluate functional timing. Generally, you will want to limit any testing tools you may provide to "power" users. Let them know that these scripts may impact performance, and should not be run continuously. Power users can be your best friends. They can alert you to trends that you may not otherwise see.
In step 1 we identified the elements of a basics of a performance tuning plan. Then, in step 2, we identified the current state for subsequent stages and determined the relative priorities which need to be met in terms of user requirements and management requirements. The next step in a tuning program is to assess the details of the system, in terms of specific architecture, processes and code.
The overall tuning process is a systematic review based on a finite set of business processes. There are a number of published articles on what constitutes good -- and bad -- performance. These articles are usually defined for a specific architecture. This is because the machine configuration -- size, speed and number of CPUs will significantly impact the system. Simply put, some systems have more horsepower than others. However, CPU is not the most important measure in dealing with performance issues. The most important measure is response time. It is the metric the user sees, and is the most effective measure of performance improvement. While there are many important ratios that measure whether a database is working effectively, the goal is for the database to effectively support user transactions. Tuning hit ratios and cache statistics is like patching potholes. Sometimes patching fixes the problem; sometimes repaving is better. That's why a more effective measure of performance tuning results is response time. Response time concentrates on the specific business process that users have identified as problematic. Select some basic, clearly understood problems and solutions. This is often referred to as "gathering the low-hanging fruit." At this point, you evaluate the information gathered during the evaluation of the current state.
Checking the business process first can have great impact on your tuning efforts. For instance, do customer support specialists indicate that their searches for open orders for a specific customer are slow? By looking at what their actual process is, you may find that the operator is not making good use of the workbench search fields, and could benefit from adding parameters to his or her search criteria. In this case, some additional one-one-one training can increase their satisfaction and the performance of their searches.
Next, obtaining benchmarks -- running specific searches at non-load times, non-peak, peak times gives a measure of how results vary throughout the day. If some operators have widely disparate results at the same time, you may need to look at network configuration. For example, if Joe and Mary in operator bay A have no problems, but operations in bay B take twice as long, the network configuration should be analyzed. Note: In one instance, only one operator on a floor was slow. It was determined that someone had connected that cubicle only to a different server at the local switch as a stopgap measure and never set it back. The moral of the story is that you need to look beyond the database for performance solutions.
Perform all benchmarks on a single, tuned PC or laptop client to ensure continuity. If necessary, move the client to perform benchmarks in different areas. If this is not possible, verify client configuration complies with company standards. Comparing results of disparate systems is extremely difficult.
Gather benchmarks for all processes identified as problematic. When all benchmarks have been derived, discuss which processes have priority and what performance goals are appropriate. You may wish to present phased (preliminary, acceptable and target) goals, as well. This is because the final solution may combine a series of user training, network and database or application tuning actions.
Documentation is used to identify the initial definition of the problem, initial conditions, goals agreed upon and the focus for the work to be done. Over time, additional documentation addresses what actions are recommended and the results of the actions. Finally, an assessment outlining the completed actions, results and recommendations for ongoing actions to maintain/further improve the increased performance is recommended.
As mentioned earlier, significant improvement can be made by retraining users in specific areas of application use. Additional training performed should be documented for later use by new users.
Because the solution for a specific area is often iterative, as efforts are defined and redefined, the work performed must be detailed and recorded scrupulously (and hopefully controlled in a change-management system). At each stage, results should be compared to the baseline; performance tuning is a balancing act, where action in one area may have deleterious effects on another.
Also because systems change over time, repeatable scripts will allow the user to verify the degree that changes have/have not occurred... (i.e. "This time last year we had 5,000 orders placed per day, at a average rate of 10 orders per hour for each call-in sales rep.")

The technical components of performance tuning
Technical architecture: Server and network tuning
If there is a problem with the Oracle server, such as an overloaded CPU, excessive memory swapping or a disk I/O bottleneck, no amount of tuning within the Oracle database is going to improve your performance.
Application architecture: Integration of middle tier. Apache Server, Forms/Reports Server.
When evaluating the performance of forms and reports, check both requirements and standards under which they were developed. Forms and reports created using standard defaults may benefit from reducing or relaxing the standards for performance reasons. In one example, approximately 20 Web forms were completely redesigned when it was determined extensive use of check constraints impacted performance. For many of the forms, data input was prevalidated; when the constraints were removed, overall performance improved over 100%.
Database architecture:
(a) Instance tuning: Tuning the Oracle SGA is the next step, and all of the Oracle initialization parameters must be reviewed to ensure that the database has been properly configured. This phase of Oracle tuning is directed at looking for resources shortages in the db_block_buffers, shared_pool_size and sort_area_size. Investigate important default parameters for Oracle, such as optimizer_mode.
(b) Object tuning: This phase of tuning looks at the setting for Oracle tables and indexes. Settings such as PCTFREE, PCTUSED and FREELISTS can have a dramatic impact on Oracle performance.
SQL tuning: This is the most time-consuming tuning operation because there can be many thousands of individual SQL statements that access the Oracle database. At a high level, we identify the most common SQL statements, tune each one by carefully reviewing the execution plan for the SQL and adjust the execution plan using Oracle hints.
Networks are increasingly more important as we move to global businesses. Work with your telecom provider to evaluate link capacity and utilization. Request detailed reports. Consider resizing to reduce costs on underutilized links or upgrading those close to capacity. Evaluate network drivers. Is there a more recent, downloadable version? Vendors often update their drivers. This can make a significant improvement.
An up-to-date, accurate, detailed network diagram of both the logical and physical network locating all servers and users can be used to troubleshoot and predict problem areas. Check transaction processing routes to and from the server... if they are not the same, check the routing tables on each. What, if any, network management systems are used? Can any devices be placed in diagnostic mode during evaluations? Check that software and hardware are not operated routinely in diagnostic mode, as this will generally affect performance. Determine number of packet retries and collisions, bandwith utilizaton. Note any traffic management or priority queuing set on any device. Detail any contracts or service level agreements for the LAN and WAN. Identify any protocols installed on the clients.
Compare findings with benchmarks to establish size and extent of performance issues.
Implementing the plan
Having evaluated the current state, and performed numerous measurements, actual changes to the system must be controlled and documented. Because you will most likely continue to assess the performance measures, changes should be documented in detail. A good practice is to establish a repository of network and database assessment scripts. Similarly, record the steps of any changes. All changes should be evaluated on a test system before rolling out to production systems. Before implementing any changes, and each successive change, be sure to make a complete backup and test your ability to restore conditions to their original state. Make only one change at a time. Recovering after multiple changes is frustrating at best and may be impossible.
The sequence of changes will be based on priorities discussed with the users, management and technical personnel. You will also want to run baseline assessment scripts to determine whether the changes are effective. Occasionally, a change may not have the intended effect. This is what makes performance tuning an iterative task.