I am trying to join two script
--script 1
select
t.vendor_code,
RTRIM(LTRIM(cast(datename(month, [CLOSED_DATE]) as char(15))))+',' + RTRIM(LTRIM(cast(year([CLOSED_DATE]) as char(20)))) as [CLOSED_DATE],
count(t.vendor_code) as [No_of_Case]
from
dbo.FTX_FA_CASE t WITH (NOLOCK)
where
[CLOSED_DATE] is not null
group by
t.vendor_code, CLOSED_DATE
and
--script 2
SELECT
(RTRIM(LTRIM(cast(datename(month, [dates]) as char(15))))+',' + RTRIM(LTRIM(cast(year([dates]) as char(20)))) + ',')
FROM
efoxsfc.dbo.FTX_FA_Calender
WHERE
1 = 1
AND CAST(dates AS DATETIME) >= DATEADD(mm, -5 ,DATEADD(m, DATEDIFF(m, 0,GETDATE()), 0))
AND dates <= DATEADD(m, DATEDIFF(m, 0,GETDATE()), 0)
which return data like this:
I want a single script which return this output:
Basically I am trying to get all the data of second table and no_of_Case from 1st table. The month which is not present in 1st script , for that no_of_case value should be "0".
Please advice !!
Try this
select
name,
(select distinct date
from Table_1 t2
where t1.date = t2.date),
count([no of count])
from
Table_1 t1
group by
name, date
Related
I would like to ask a little help on using dynamic sql date header,
i have data that i count transaction group by date then by hours.
date range would be entered start date and end date.
my data is simple just date and time:
created_Date
'2020-01-14 13:25:20.147'
'2020-01-14 13:23:15.639'
'2020-01-14 12:27:48.896'
'2020-01-09 20:03:06.713'
'2020-01-09 19:33:05.032'
'2020-01-09 19:16:35.590'
'2020-01-09 19:08:19.788'
'2020-01-09 13:02:03.543'
'2020-01-09 12:23:12.595'
'2020-01-08 15:29:52.262'
'2020-01-08 15:17:31.247'
'2020-01-08 15:16:51.499'
'2020-01-08 13:29:47.661'
'2020-01-06 20:19:30.173'
currently found this code:
ALTER PROCEDURE "DBA"."test_trancountdaily"(#sdate datetime, #edate datetime)
BEGIN
create table #trantable(TDate varchar(100),Hour varchar(2), count varchar(1000));
insert #trantable
SELECT CAST(created_date as date) AS ForDate,
DATEPART(hour,created_date) AS OnHour,
COUNT(*) AS Totals
FROM prescription
WHERE created_date >= #sdate and created_date <= #edate
GROUP BY CAST(created_date as date),
DATEPART(hour,created_date)
ORDER BY CAST(created_date as date),
DATEPART(hour,created_date) asc;
select * from #trantable;
END
my data are created_date datetime and would count how many transaction that is inside a Hour
but would like an output like this:
HR
2020-01-01
2020-01-02
2020-01-03 etc
1
1
0
3
2
0
1
1
3
1
1
1
4
1
0
2
thanks
bolivar1985
Sample Result in interactive sql
Good Day,
Just solve query without using pivot in sybase it was mind troubling but got it.
set #sql_date = #sql_date + ', COUNT(CASE WHEN DATE(prescription.created_date) = ''' + #ls_date +
''' AND #time_table.hrs = HOUR(prescription.created_date) THEN prescription.tran_id END) AS
[' + #ls_date + ']' ;
looping the date range to be given by user, and date as header.
bolivar1985
i have table with column name policy_refer, client name and issue_date
policy_refer Client_Name issue_date(entry_date)
0001 Ajaz 01-Jan-2019
0001 Ajaz 05-Jan-2019
0001 Anita 10-Jan-2019
i want to select last update/insert client_name where policy_refer = 0001 , in my select/join query ....
select policy_master.CLIENT_NAME
,POLICY_INSURER_DETAIL.INSURER_NAME
,POLICY_INSURER_DETAIL.INSURER_BRANCH
,POLICY_INSURER_DETAIL.policy_number
,policy_master.policy_refer
,policy_master.POLICY_CLASS
,policy_master.POLICY_PRODUCT
,policy_master.P_ISSUE_DATE
,policy_master.EXPIRY_DATE
,sum(policy_master.TOTAL_SUMINSURED)
,sum(policy_master.GROSS)
,sum(policy_master.PERMIUM)
from POLICY_MASTER,POLICY_INSURER_DETAIL
where policy_master.policy_refer = POLICY_INSURER_DETAIL.POLICY_REFER
and POLICY_MASTER.POL_ID = POLICY_INSURER_DETAIL.POL_ID
and POLICY_MASTER.EXPIRY_DATE ='19-AUG-20'
and POLICY_MASTER.DOC_STATUS ='Posted'
group by POLICY_MASTER.policy_refer
,POLICY_INSURER_DETAIL.INSURER_NAME
,POLICY_INSURER_DETAIL.INSURER_BRANCH
,POLICY_INSURER_DETAIL.policy_number
,policy_master.policy_refer
,policy_master.EXPIRY_DATE
,policy_master.CLIENT_NAME
,policy_master.POLICY_CLASS
,policy_master.POLICY_PRODUCT
,policy_master.P_ISSUE_DATE;
One option is to find which one's the first (using analytic functions), and then fetch that row. For example:
select column_list_goes_here
from (
--> this is your current query, with the ROW_NUMBER addition
select all your columns here,
--
row_number() over (partition by policy_refer order by issue_date desc) rn
--
from your_tables
where ...
group by ...
--> end of your current query
)
where rn = 1
It appears the table you are talking about is the POLICY_MASTER table and you want the latest row (by ISSUE_DATE) for each policy.
Then you want to filter something like this:
select pm.CLIENT_NAME
,POLICY_INSURER_DETAIL.INSURER_NAME
,POLICY_INSURER_DETAIL.INSURER_BRANCH
,POLICY_INSURER_DETAIL.policy_number
,pm.policy_refer
,pm.POLICY_CLASS
,pm.POLICY_PRODUCT
,pm.P_ISSUE_DATE
,pm.EXPIRY_DATE
,sum(pm.TOTAL_SUMINSURED)
,sum(pm.GROSS)
,sum(pm.PERMIUM)
from (
SELECT *
FROM (
SELECT POL_ID,
CLIENT_NAME,
policy_refer,
POLICY_CLASS,
POLICY_PRODUCT,
P_ISSUE_DATE,
EXPIRY_DATE,
TOTAL_SUMINSURED,
GROSS,
PERMIUM,
ROW_NUMBER() OVER ( PARTITION BY policy_refer /*, pol_id */
ORDER BY ISSUE_DATE DESC ) AS rn
FROM POLICY_MASTER
WHERE EXPIRY_DATE = DATE '2020-08-19'
AND DOC_STATUS ='Posted'
)
WHERE rn = 1
) pm
INNER JOIN POLICY_INSURER_DETAIL pid
ON ( pm.policy_refer = pid.POLICY_REFER
AND pm.POL_ID = pid.POL_ID )
GROUP BY pm.policy_refer
,pid.INSURER_NAME
,pid.INSURER_BRANCH
,pid.policy_number
,pm.policy_refer
,pm.EXPIRY_DATE
,pm.CLIENT_NAME
,pm.POLICY_CLASS
,pm.POLICY_PRODUCT
,pm.P_ISSUE_DATE;
I guess you need the first row after sorting them in descending from issue date -
select policy_master.CLIENT_NAME
,POLICY_INSURER_DETAIL.INSURER_NAME
,POLICY_INSURER_DETAIL.INSURER_BRANCH
,POLICY_INSURER_DETAIL.policy_number
,policy_master.policy_refer
,policy_master.POLICY_CLASS
,policy_master.POLICY_PRODUCT
,policy_master.P_ISSUE_DATE
,policy_master.EXPIRY_DATE
,sum(policy_master.TOTAL_SUMINSURED)
,sum(policy_master.GROSS)
,sum(policy_master.PERMIUM)
from POLICY_MASTER,POLICY_INSURER_DETAIL
where policy_master.policy_refer = POLICY_INSURER_DETAIL.POLICY_REFER
and POLICY_MASTER.POL_ID = POLICY_INSURER_DETAIL.POL_ID
and POLICY_MASTER.EXPIRY_DATE ='19-AUG-20'
and POLICY_MASTER.DOC_STATUS ='Posted'
and rownum = 1
group by POLICY_MASTER.policy_refer
,POLICY_INSURER_DETAIL.INSURER_NAME
,POLICY_INSURER_DETAIL.INSURER_BRANCH
,POLICY_INSURER_DETAIL.policy_number
,policy_master.policy_refer
,policy_master.EXPIRY_DATE
,policy_master.CLIENT_NAME
,policy_master.POLICY_CLASS
,policy_master.POLICY_PRODUCT
,policy_master.P_ISSUE_DATE
ORDER BY P_ISSUE_DATE DESC;
I have 3 table t1, t2 and t3.
t1 has 2 column-> id1, val1
t2 -> id2, val2
t3 -> id3, val3
If id1=id2 and id2 = id3
then I need to update val1 ad val3.
But I have repeating id1 and each should have same val3
I am using
update t1
inner join t2 on t1.id1 = t2.id2
inner join t3 on t2.id2 = t3.id3
set t1.val1 = t3.val3
;
But not able to do this.
The correct syntax is:
UPDATE table_name SET column = { expression | DEFAULT } [,...]
[ FROM fromlist ]
[ WHERE condition ]
So your UPDATE statement should look as follows:
update t1 set val1 = val3
from t2 inner join t3 on t2.id2 = t3.id3
where t1.id1 = t2.id2
;
See the Redshift documentation and their comprehensive UPDATE examples.
I needed to get values from the other table, t2.val3, on Redshift. I used the following
update t1
set val1 = t2.val3
from t2 join t1 t on t.id = t2.id;
I have to re-name t1. Otherwise Redshift complains.
Per #jie solution, but expanded a bit and w/o the distraction of a table alias as a table name (heh heh):
update
my_counts
set
my_count_delta = t1.my_count - t2.my_count
from
my_counts t1 join
my_counts t2 on
t1.group_id = t2.group_id and
t2.count_dt = ( t1.count_dt - interval '1 day' )
where
t1.count_error is null
A few notes:
my_count is a running total
my_count_delta is the change from the previous day entry (-/+)
this solves the issue where running total exists, but summarial columnar data for the delta needs to be added
:heart: how pg style sql makes date add/subtraction so simple: ( t1.count_dt - interval '1 day' )
as an avid lover of LEFT JOIN i was confounded that this would not run with a left join...the error message was very clear requiring "balanced join" or was it "even join"...so i went back to the #jie version of JOIN and found that the query was incredibly fast.
(Background: I'm attempting to find the "peak" hour of activity in a series of cameraapis, defined as having the most entries with a start and end date between 1 hour periods (starting with the beginning of the hour) For example, 1:00 to 2:00 may have 8 entries within that timeframe, but 2:00 to 3:00 has 12 entries - so I would want to have it return the 12 entry timeframe.)
I'm having trouble getting associated data from a SELECT query of a group. Here is the code:
def reach_peak_hour_by_date_range(start_date, end_date)
placement_self_device_id = self.device_id
query = <<-SQL
SELECT max(y.num_entries) AS max_entries
FROM
(
SELECT x.starting_hour, count(*) AS num_entries
FROM
(
SELECT date_trunc('hour', visitor_start_time) starting_hour
FROM Cameraapis WHERE device_id = '#{placement_self_device_id}'::text AND visitor_start_time > '#{start_date}'::timestamp AND visitor_end_time < '#{end_date}'::timestamp
) AS x
GROUP BY x.starting_hour
) AS y
SQL
results = Placement.connection.execute(query)
binding.pry
end
Cameraapi have a device_id, visitor_start_time, and visitor_end_time, referenced in the code.
This code successfully returns the max_entries in a 1 hour period, but I can't figure out what to SELECT to get the associated starting_hour to that max_entries. Because it is a group, it requires aggregated functions, which I don't actually need. Any advice?
didnt quite understand the question ... use window functions
select starting_hour , num_entries from (
SELECT starting_hour ,y.num_entries, max(y.num_entries) over() AS max_entries
FROM
(
SELECT x.starting_hour, count(*) AS num_entries
FROM
(
SELECT date_trunc('hour', visitor_start_time) starting_hour
FROM Cameraapis WHERE device_id = '#{placement_self_device_id}'::text AND visitor_start_time > '#{start_date}'::timestamp AND visitor_end_time < '#{end_date}'::timestamp
) AS x
GROUP BY x.starting_hour
) AS y
) as u
where num_entries = max_entries
this query returns all entries associated with peak hour, you can modify it to return only entry count with associated hour selecting hour and count using distinct or grouping
select * from
(
select x.*, max(num_entries) over()as max_num_entries from
(
SELECT Cameraapis.* ,date_trunc('hour', visitor_start_time) as starting_hour, count(*) over( partition by date_trunc('hour', visitor_start_time)) as num_entries
FROM Cameraapis WHERE device_id = '#{placement_self_device_id}'::text AND visitor_start_time > '#{start_date}'::timestamp AND visitor_end_time < '#{end_date}'::timestamp
) as x
) as x where max_num_entries = num_entries
I have two tables table1 and table2. Each table contains a column with itemPrice. I need to add the two columns together.
The SQL query below returns the correct SUM.
SELECT SUM(item1+ item2) FROM
(select SUM(t1.itemPrice) item1 from table1 t1 WHERE t1.userid=='jonh') tableA
CROSS JOIN
(select SUM(t2.itemPrice) item2 from table2 t2 WHERE t1.userid=='jonh') tableB
I am not been lazy but the above query has so many SUM functions that I don't know where to start to write LINQ queries.
Can anyone help?
Ceci,
Hopefully this will give you what you want...
from f in (
from x in ( from t1 in Table1
where t1.Userid.Equals("John")
select new { Userid = t1.Userid }
).Distinct()
select new { item1 = ( from z in Table1
where z.Userid.Equals("John")
select z.ItemPrice ).Sum() ??0 ,
item2 = ( from z in Table2
where z.Userid.Equals("John")
select z.ItemPrice ).Sum() ??0 }
) select new { total = f.item1 + f.item2 }
In the case where there are no records for "john" in one table, it will bring back a 0 and sum up the other tables.
hope this helps.