How to execute the scripts in procedure in parallel - stored-procedures

I have to execute the scripts in procedure parallely so that it will decrease the execute time of procedure. Can anyone please help here.
My procedure is as below.I just want all the select statement in my procedure should start executing at exact time.Please help me , I want this in my project.Pleae let me know if futhure details are required.
The insert statement must not start at the same time. It will be okay if it executed sequenntially;
Plea
CREATE OR REPLACE
PROCEDURE "test_proc"(
p_fromdate DATE)
AS
fromdate DATE;
todate DATE;
emp_id NUMBER := 0;
emp_address NUMBER := 0;
emp_dob NUMBER := 0;
emp_doj NUMBER := 0;
emp_msisdn NUMBER := 0;
emp_name NUMBER := 0;
emp_vehicl_number NUMBER := 0;
emp_vehicl_type NUMBER := 0;
emp_middle_name NUMBER := 0;
emp_last_name NUMBER := 0;
BEGIN
SELECT id INTO emp_id FROM employee ;
SELECT address INTO emp_address FROM employee ;
SELECT dob INTO emp_dob FROM employee ;
SELECT doj INTO emp_doj FROM employee ;
SELECT msisdn INTO emp_msisdn FROM employee ;
SELECT name INTO emp_name FROM employee ;
SELECT vehicle_number INTO emp_vehicl_number FROM employee ;
SELECT vehicle_type INTO emp_vehicl_type FROM employee ;
SELECT middlename INTO emp_middle_name FROM employee ;
SELECT lastNAme INTO emp_last_name FROM employee ;
INSERT
INTO test
(
idofEmp,
Empaddress,
Empdob,
Empdoj,
Empmsisdn,
Empname,
Empvehicle_number,
Empvehicle_type,
Empmiddlename,
EmplastNAme
)
VALUES
(
emp_id,
emp_address,
emp_dob,
emp_doj,
emp_msisdn,
emp_name,
emp_vehicl_number,
emp_vehicl_type,
emp_middle_name,
emp_last_name
);
END;

Related

Using Variables in Query Criteria in a Snowflake Scripting Stored Procedure

In the following query I am using a stored procedure to create a table output_tbl_name from a source_tbl. The name of the output_tbl_name is timestamped with today's date.
DECLARE
creation_date STRING := to_varchar(current_date(), 'YYYYMMDD');
output_tbl_name STRING := concat('my_database.my_schema.', 'output_', :creation_date);
QUERY STRING;
BEGIN
QUERY:= REPLACE(
'create or replace table <output_tbl_name>(col1 varchar, col2 varchar) as
select * from source_tbl;'
,'<output_tbl_name>', :output_tbl_name); ;
EXECUTE IMMEDIATE :QUERY;
RETURN :QUERY;
END;
However, I would like to set a dynamic criteria for the data that is copied over. E.g., the source_tbl has a date column, and I want to only copy over records for dates > 6 months ago:
declare start_date date := add_months(current_date(), -6)
create or replace output_tbl_name as select * from source_tbl where date > <start_date>
How can I incorporate this into my above query? Placing it directly into my QUERY in my BEGIN statement isn't working. Thanks for your help!
create a source data table:
CREATE OR REPLACE TABLE tmp_table(col1 string);
DECLARE
creation_date STRING := to_varchar(current_date(), 'YYYYMMDD');
output_tbl_name STRING := concat('my_database.my_schema.', 'output_', :creation_date);
QUERY STRING;
source_tbl STRING := 'TMP_TABLE';
BEGIN
LET date_present := false;
SELECT true INTO :date_present FROM INFORMATION_SCHEMA.columns WHERE table_name = :source_tbl AND column_name = 'DATE';
if (date_present = true) then
QUERY := 'create or replace table <output_tbl_name>(col1 varchar, col2 varchar) as select * from source_tbl where date > <start_date>;';
ELSE
QUERY:= 'create or replace table <output_tbl_name>(col1 varchar, col2 varchar) as select * from source_tbl;';
END IF;
QUERY:= REPLACE( QUERY, '<output_tbl_name>', :output_tbl_name);
--EXECUTE IMMEDIATE :QUERY;
RETURN :QUERY;
END;
begin
let count := true;
if (count = true) then
return 'negative value';
elseif (count = 0) then
return 'zero';
else
return 'positive value';
end if;
end;
anonymous block
create or replace table my_database.my_schema.output_20220307(col1 varchar, col2 varchar) as select * from source_tbl;
put in a date column:
CREATE OR REPLACE TABLE tmp_table(col1 string, date timestamp);
anonymous block
create or replace table my_database.my_schema.output_20220307(col1 varchar, col2 varchar) as select * from source_tbl where date > <start_date>;

Distinct Count in fastreport

Does anyone know how to make a distinct count in fastreport?
Example
I have the report:
Name sex
João m
João m
Maria f
In the normal count, the result would be 3, but I want one that takes only the number of rows that do not repeat the field name .
In this case, the result would be 2.
Can anyone help me? That's just an example.
I can not do a group by in SQL because I have several fields.
I'm not skilled in using FastReport but I've found this page on the FastReport's official forum.
I think you can change the example by adapting it to your scenario (Note that the syntax could require some adjustments).
Bands:
GroupHeader1 <Sex>
MasterData1 [Name, Sex, ...]
GroupFooter1 [GetDistinctCount]
Script (Only working with dataset sorted by the field to count):
var
LastValue : string;
DistinctCount : integer;
//create this event by double-clicking the event from the Object Inspector
procedure OnGroupHeader1.OnBeforePrint;
begin
if LastValue <> (<Datasetname."Sex">) then
Inc(DinstinctCount);
LastValue := <Datasetname."Sex">
end;
function GetDistinctCount: string;
begin
Result := IntToStr(DistinctCount);
end;
The base idea is that the DistinctCount variable is incremented each time the field value changes.
Script (Should works also with unsorted dataset):
var
FoundValues : array of string;
(* !!IMPORTANT!!
You need to initialize FoundValues array before to start counting: *)
SetLength(FoundValues, 0);
function IndexOf(AArray : array of string; const AValue : string) : integer;
begin
Result := 0;
while(Result < Length(AArray)) do
begin
if(AArray[Result] = AValue) then
Exit;
Inc(Result);
end;
Result := -1;
end;
//create this event by double-clicking the event from the Object Inspector
procedure OnGroupHeader1.OnBeforePrint;
begin
if(IndexOf(FoundValues, <Datasetname."Sex">) = -1) then
begin
SetLength(FoundValues, Length(FoundValues) + 1);
FoundValues[Length(FoundValues) - 1] := <Datasetname."Sex">;
end;
end;
function GetDistinctCount: string;
begin
Result := IntToStr(Length(FoundValues));
end;
The base idea is that each different value found is added to the FoundValues array.
You can do it in Firebird without GROUP BY as:
DECLARE #T TABLE (ID INT IDENTITY (1,1), Name NVARCHAR(25) , Sex CHAR(1));
INSERT INTO #T VALUES
('Sami','M'),
('Sami','M'),
('Maria','F');
SELECT DISTINCT Name , Sex FROM #T
You can also create a View , and then use it in your report.
If you really need to do that in FastReport, you have to use a GroupHeader and a GroupFooter to that.
How ?
You have to write your script in OnBeforePrint event.
procedure OnGroupHeader1.OnBeforePrint;
Create this one by double-clicking in the event in the object inspector.

How to ignore some parameters in TQuery

If i have a SQL statement like below
SELECT * FROM myTable WHERE CID = :vCID AND DataType = :vDataType
And usually i use TQuery to get some data like below
aQuery.ParamByName('vCID').Value := '0025';
aQuery.ParamByName('vDataType').AsInteger := 1;
But how can i ignore the "CID" key to get a SQL like
SELECT * FROM myTable WHERE DataType = :vDataType
I've try the below synctax, but failed
aQuery.ParamByName('vCID').Value := '%';
aQuery.ParamByName('vDataType').AsInteger := 1;
Please help me out, thank you.
Change your Query to
SELECT * FROM myTable
WHERE CID = ISNULL(:vCID,CID) AND DataType = ISNULL(:vDataType,DataType)
or
SELECT * FROM myTable
WHERE COALESCE(CID,'') = COALESCE(:vCID,CID,'')
AND COALESCE(DataType,0) = COALESCE(:vDataType,DataType,0)
The second one would handle the case of NULL values in the table too.
The Parameter you don't want to use can be set to Unassigned
aQuery.ParamByName('vCID').Value := Unassigned; // <<
aQuery.ParamByName('vDataType').AsInteger := 1;
Since :vCid is NULL it will be evaluated as CID = CID
The best option is to simply use separate queries:
aQueryBoth.SQL.Text := 'SELECT * FROM myTable WHERE CID = :vCID AND DataType = :vDataType';
...
aQueryBoth.ParamByName('vCID').Value := '0025';
aQueryBoth.ParamByName('vDataType').AsInteger := 1;
aQueryDataType.SQL.Text := 'SELECT * FROM myTable WHERE DataType = :vDataType';
...
aQueryDataType.ParamByName('vDataType').AsInteger := 1;
Usual but somewhat verbose way is to introduce yet another parameter.
SELECT * FROM myTable
WHERE ( ( CID = :vCID ) OR ( :IgnoreCID <> 0 ))
AND ( DataType = :vDataType )
Then turning your queries into
aQuery.ParamByName('vCID').Value := '0025';
aQuery.ParamByName('IgnoreCID').AsInteger := 0;
aQuery.ParamByName('vDataType').AsInteger := 1;
or
aQuery.ParamByName('vCID').Value := Unassigned;
aQuery.ParamByName('IgnoreCID').AsInteger := 1;
aQuery.ParamByName('vDataType').AsInteger := 1;
If the server has decent SQL Optimizer, then it would figure out when the 1st parameter is worth checking or not.

Reuse TSQLQuery Missing Params

I'm using Delphi XE2 and a TSQLQuery object. It works the first time that I use it. If I immediately reuse it, then it doesn't parse the new SQL for its parameters and rebuild the qry.Params list:
var
qry: TSQLQuery;
begin
qry := TSQLQuery.Create(nil);
try
qry.MaxBlobSize := -1;
qry.SQLConnectin := AnExistingConnection;
qry.CommandText := 'select field1 from table1 where fieldX = #valueX';
qry.ParamByName('valueX').Value := 1;
qry.Open;
// ... use data ...
qry.Close;
qry.Params.Clear; // <- works the same with or without this
qry.CommandText := 'select field2 from table2 where fieldY = #valueY';
qry.ParamByName('valueY').Value := 2; // <- Error: 'valueY' Param not found
qry.Open;
finally
FreeAndNil(qry);
end;
end;
It doesn't matter what I do, it doesn't parse the 2nd SQL statement for its parameters so I can't bind the 'valueY' parameter by name.
I can think of two workarounds:
Manually build the qry.Params list myself.
Destroy and recreate the qry object in between the two commands.
I shouldn't have to do either of these. Perhaps there is a property or something on the qry object that will cause it to reparse parameters each time a new SQL statement is assigned to its CommandText property?
Turned out to be a syntax issue. Params must be prefaced with a : not a #. I had local SQL variables throughout the real first query, so there was a mixture of #param and :param variables throughout the SQL. By using the :param syntax for all bound parameters, the TSQLQuery does properly parse the parameters each time, like it is supposed to do.
var
qry: TSQLQuery;
begin
qry := TSQLQuery.Create(nil);
try
qry.MaxBlobSize := -1;
qry.SQLConnectin := AnExistingConnection;
qry.CommandText := 'select field1 from table1 where fieldX = :valueX';
qry.ParamByName('valueX').Value := 1;
qry.Open;
// ... use data ...
qry.Close;
qry.CommandText := 'select field2 from table2 where fieldY = :valueY';
qry.ParamByName('valueY').Value := 2;
qry.Open;
finally
FreeAndNil(qry);
end;
end;
Use the TSQLQuery.SQL property instead of the TSQLQuery.CommandText property:
qry.SQL.Text := 'select field1 from table1 where fieldX = #valueX';
...
qry.SQL.Text := 'select field2 from table2 where fieldY = #valueY';
No need to call Params.Clear in between, the SQL property will handle that for you.

Have an issue of extrem slow sp and he dont do his job

i got this sp:
DROP TABLE IF EXISTS SplitValuesDump;
CREATE TABLE SplitValuesDump (
value VARCHAR(1000) NOT NULL PRIMARY KEY
);
DELIMITER $$
DROP PROCEDURE IF EXISTS `ChangeSitesRedirects`$$
CREATE PROCEDURE `ChangeSitesRedirects`(
prodimainAddress varchar(255),
subdomainMainAddress varchar(255)
)
SQL SECURITY INVOKER
BEGIN
DECLARE tdomain varchar(1000);
DECLARE tvalue varchar(1000);
DECLARE prepValue varchar(1000);
DECLARE subdomainFullAddress varchar(1000);
DECLARE totalDomain int;
DECLARE tclientid int;
DECLARE sitedone INT DEFAULT 0;
DECLARE splitdone INT DEFAULT 0;
DECLARE lastDomain varchar(1000);
DECLARE curlSites CURSOR FOR (SELECT domain,clientid from sites where redirectsubdomain = 'N');
DECLARE CONTINUE HANDLER FOR NOT FOUND SET sitedone = 1;
set sitedone := 0;
OPEN curlSites;
Scan_Sites:WHILE (sitedone = 0) DO
IF sitedone = 1 THEN
BEGIN
LEAVE Scan_Sites;
END;
ELSE
BEGIN
DECLARE curlStringDump CURSOR FOR (SELECT `value` from SplitValuesDump);
DECLARE CONTINUE HANDLER FOR NOT FOUND SET splitdone = 1;
FETCH curlSites INTO tdomain,tclientid;
CALL split_string(tdomain,';');
OPEN curlStringDump;
SET splitdone:=0;
ScanDump: WHILE (splitdone = 0) DO
IF splitdone = 1 THEN
BEGIN
LEAVE ScanDump;
END;
ELSE
BEGIN
FETCH curlStringDump INTO tvalue;
SET subdomainFullAddress:= subdomainMainAddress;
IF tvalue <> "" THEN
BEGIN
IF tvalue like prodimainAddress OR tvalue like subdomainMainAddress THEN
BEGIN
set totalDomain := totalDomain + 1;
IF tvalue like subdomainMainAddress THEN
BEGIN
SET subdomainFullAddress := tvalue;
END;
END IF;
END;
ELSE
BEGIN
set totalDomain := totalDomain + 1;
set lastDomain := tvalue;
END;
END IF;
END;
END IF;
END;
END IF;
END WHILE ScanDump;
CLOSE curlStringDump;
SET splitdone :=0;
SET prepValue:='N';
IF lastDomain = '' AND totalDomain = 2 THEN
BEGIN
set prepValue := subdomainFullAddress || CHAR(2) || prodimainAddress;
INSERT INTO sites_tmp SELECT * FROM sites where clientid = tclientid limit 1;
UPDATE sites_tmp SET redirectsubdomain = prepValue WHERE clientid = tclientid limit 1;
END;
ELSE
BEGIN
set prepValue := prodimainAddress || CHAR(2) || lastDomain || CHAR(1) ||subdomainFullAddress || CHAR(2) || lastDomain;
INSERT INTO sites_tmp SELECT * FROM sites where clientid = tclientid limit 1;
UPDATE sites_tmp SET redirectsubdomain = prepValue WHERE clientid = tclientid limit 1;
END;
END IF;
END;
END IF;
END WHILE Scan_Sites;
CLOSE curlSites;
SET sitedone :=0;
END$$
i try in the get few info from column data split his data and bring some data ion there.
for each recored on table sites
and then update table sites_tmp.
i got issue that i not know how i can debug at or make at faster?
what ur recommend here?
as well why its so slow???
and in the end its not passed the all the records?

Resources