oracle sqlplus won't run script - sqlplus

SQL> desc product
Name Null? Type
----------------------------------------- -------- ----------------------------
P_CODE NOT NULL VARCHAR2(10)
P_DESCRIPT NOT NULL VARCHAR2(35)
P_INDATE NOT NULL DATE
P_QOH NOT NULL NUMBER(38)
P_MIN NOT NULL NUMBER(38)
P_PRICE NOT NULL NUMBER(8,2)
P_DISCOUNT NOT NULL NUMBER(5,2)
V_CODE NUMBER(38)
SQL> #ld_product.sql;
--sqlplus does nothing for this script--insert product
set linesize 500;
set sqlbl on;
INSERT INTO PRODUCT VALUES ('11QER/31','Power painter, 15 pal, 3-nozzle','03-Nov-13','8','5','109.99','0.00','25595');
INSERT INTO PRODUCT VALUES ('13-Q2/P2','7.25-in.pwr.saw blade','13-Dec-13','32','15','14.99','0.05','21344');
INSERT INTO PRODUCT VALUES ('14-Q1/L3','9.00-in.pwr.saw blade','13-Nov-13','18','12','17.49','0.00','21344');
INSERT INTO PRODUCT VALUES ('1548-QQ2','Hrd.cloth,1/4-in.,2x50','15-Jan-14','15','8','39.95','0.00','23119');
INSERT INTO PRODUCT VALUES ('1558-QW1','Hrd.cloth,1/2-in.,3x50','15-Jan-14','23','5','43.99','0.00','23119');
/*
set define off;
INSERT INTO PRODUCT VALUES ('2232/QTY','B&D jigsaw,12-in.blade','30-Dec-13','8','5','109.92','0.05','24288');
set define off;
INSERT INTO PRODUCT VALUES ('2232/QWE','B&D jigsaw,8-in.blade','24-Dec-13','8','5','99.87','0.05','24288');
set define off;
INSERT INTO PRODUCT VALUES ('2238/QPD','B&D cordless drill, 1/2-in.','20-Jan-14','12','5','38.95','0.05','25595');
INSERT INTO PRODUCT VALUES ('23109-HB','Claw hammer','20-Jan-14','23','10','9.95','0.10','21225');
INSERT INTO PRODUCT VALUES ('23114-AA','Sledge hammer, 12 lb.','02-Jan-14',' 8','5','14.40','0.05',' ');
INSERT INTO PRODUCT VALUES ('547?8-2T','Rat-tail file,1/8-in.fine','15-Dec-13','43','20','4.99','0.00','21344');
INSERT INTO PRODUCT VALUES ('89-WRE-Q','Hicut chain saw, 16 in.','07-Feb-14','11','5','256.99','0.05','24288');
INSERT INTO PRODUCT VALUES ('PVC23DRT','PVC pipe, 3.5-in., 8-ft','20-Feb-14','188','75','5.87','0.00',' ');
INSERT INTO PRODUCT VALUES ('SM-18277','1.25-in.metal screw,25','01-Mar-14','172','75','6.99','0.00','21225');
INSERT INTO PRODUCT VALUES ('SW-23116','2.5-in.wd.screw,50','24-Feb-14','237','100','8.45','0.00','21231');
INSERT INTO PRODUCT VALUES ('WR3/TT3','Steel matting, 4'x8'x1/6",.5" mesh','17-Jan-14','18','5','119.95','0.10','25595');
*/

You need to issue a COMMIT at the end in order to save your inserts to the database.
You could also:
SET AUTOCOMMIT ON:
which tells sqlplus to issue a commit automatically after each statement if you wish. Could be risky though, as you can't rollback a goof then. Read up on it first.

Related

Getting the rowsAffected output in log analytics

I'm trying to get the rows affected output from an sql server stored procedure activity in azure data factory to azure log analytics. I can currently get the rowsCopied and rowsRead from the copy activity. Would appreciate any help.
Below is the sample code in determining the effected rows from stored procedure in SQL server
CREATE PROCEDURE UpdateTables
AS
BEGIN
-- SET NOCOUNT ON added to prevent extra result sets from
-- interfering with SELECT statements.
SET NOCOUNT ON;
DECLARE #RowCount1 INTEGER
DECLARE #RowCount2 INTEGER
DECLARE #RowCount3 INTEGER
DECLARE #RowCount4 INTEGER
UPDATE Table1 Set Column = 0 WHERE Column IS NULL
SELECT #RowCount1 = ##ROWCOUNT
UPDATE Table2 Set Column = 0 WHERE Column IS NULL
SELECT #RowCount2 = ##ROWCOUNT
UPDATE Table3 Set Column = 0 WHERE Column IS NULL
SELECT #RowCount3 = ##ROWCOUNT
UPDATE Table4 Set Column = 0 WHERE Column IS NULL
SELECT #RowCount4 = ##ROWCOUNT
SELECT #RowCount1 AS Table1, #RowCount2 AS Table2, #RowCount3 AS Table3, #RowCount4 AS Table4
END
Go through this SO for complete details

providing created_at and updated_at in mass insert for rails with postgres

I'm doing a mass insert a la #chris-heald. If you're interacting with the database directly using raw SQL, do you need to provide the created_at and updated_at fields yourself or will you get that for free from postgres? I'm unclear whether it's Rails or postgres that does the automagical generation here.
ActiveRecord sets the updated_at and created_at values by itself, it doesn't set up the database to supply those values.
If you're doing a bulk insert through SQL then you have some options:
Assign them manually by using Time.now.utc.iso8601 to build the necessary strings. If you need greater precision in your timestamps then you can use strftime instead of iso8601 to build the strings.
Let the database do it by setting default values for those two columns. You could say:
alter table your_table alter column created_at set default now();
alter table your_table alter column created_at set default now();
to add the defaults, then do your bulk import, and then get rid of the defaults using:
alter table your_table alter column created_at drop default;
alter table your_table alter column created_at drop default;
You'd use ActiveRecord::Base.connection.execute to send those ALTER TABLEs into your database if you're doing all this from within Rails.

Use computed columns in related table in power query on SQLITE via odbc

In power query ( version included with exel 2016, PC ), is it possible to refer to a computed column of a related table?
Say I have an sqlite database as follow
PRAGMA foreign_keys=OFF;
BEGIN TRANSACTION;
CREATE TABLE products (
iddb INTEGER NOT NULL,
px FLOAT,
PRIMARY KEY (iddb)
);
INSERT INTO "products" VALUES(0,0.0);
INSERT INTO "products" VALUES(1,1.1);
INSERT INTO "products" VALUES(2,2.2);
INSERT INTO "products" VALUES(3,3.3);
INSERT INTO "products" VALUES(4,4.4);
CREATE TABLE sales (
iddb INTEGER NOT NULL,
quantity INTEGER,
product_iddb INTEGER,
PRIMARY KEY (iddb),
FOREIGN KEY(product_iddb) REFERENCES products (iddb)
);
INSERT INTO "sales" VALUES(0,0,0);
INSERT INTO "sales" VALUES(1,1,1);
INSERT INTO "sales" VALUES(2,2,2);
INSERT INTO "sales" VALUES(3,3,3);
INSERT INTO "sales" VALUES(4,4,4);
INSERT INTO "sales" VALUES(5,5,0);
INSERT INTO "sales" VALUES(6,6,1);
INSERT INTO "sales" VALUES(7,7,2);
INSERT INTO "sales" VALUES(8,8,3);
INSERT INTO "sales" VALUES(9,9,4);
COMMIT;
basically we have products ( iddb, px ) and sales of those products ( iddb, quantity, product_iddb )
I load this data in power query by:
A. creating an ODBC data source using SQLITE3 driver : testDSN
B. in excel : data / new query , feeding this connection string Provider=MSDASQL.1;Persist Security Info=False;DSN=testDSN;
Now in power query I add a computed column, say px10 = px * 10 to the product table.
In the sales table, I can expand the product table into product.px, but not product.px10 . Shouldn't it be doable? ( in this simplified example I could expand first product.px and then create the px10 column in the sales table, but then any new table needinng px10 from product would require me to repeat the work... )
Any inputs appreciated.
I would add a Merge step from the sales query to connect it to the product query (which will include your calculated column). Then I would expand the Table returned to get your px10 column.
This is instead of expanding the Value column representing the product SQL table, which gets generated using the SQL foreign key.
You will have to come back and add any further columns added to the product query to the expansion list, but at least the column definition is only in one place.
In functional programming you don't modify existing values, only create new values. When you add the new column to product it creates a new table, and doesn't modify the product table that shows up in related tables. Adding a new column over product can't show up in Odbc's tables unless you apply that transformation to all related tables.
What you could do is generalize the "add a computed column" into a function that takes a table or record and adds the extra field. Then just apply that over each table in your database.
Here's an example against Northwind in SQL Server
let
Source = Sql.Database(".", "Northwind_Copy"),
AddAColumn = (data) => if data is table then Table.AddColumn(data, "UnitPrice10x", each [UnitPrice] * 10)
else if data is record then Record.AddField(data, "UnitPrice10x", data[UnitPrice] * 10)
else data,
TransformedSource = Table.TransformColumns(Source, {"Data", (data) => if data is table then Table.TransformColumns(data, {"Products", AddAColumn}, null, MissingField.Ignore) else data}),
OrderDetails = TransformedSource{[Schema="dbo",Item="Order Details"]}[Data],
#"Expanded Products" = Table.ExpandRecordColumn(OrderDetails, "Products", {"UnitPrice", "UnitPrice10x"}, {"Products.UnitPrice", "Products.UnitPrice10x"})
in
#"Expanded Products"

Informix trigger to change inserted values

I would like to change a couple of column values before they get inserted.
I am using Informix as database.
I have a table consisting of 3 columns: Name (NVARCHAR), Type (INT), Plan (NVARCHAR).
Every time a new record is inserted, I would like to check the Name value before inserting it. If the Name starts with an F, I would like to set the Type value to 1 and the Plan Name to "Test"
In short, what I want the trigger to do is:
For every new insertion, first check if Name value starts with F.
If yes, set the Type and Plan to 1 and "Test" then insert.
If no, insert the values as-is.
I have looked up the CREATE TRIGGER statement with BEFORE and AFTER. However, I would like to have a clearer example. My case would probably involve BEFORE though.
The answer of #user3243781 get close, but did not work because it returns the error:
-747 Table or column matches object referenced in triggering statement.
This error is returned when a triggered SQL statement acts on the
triggering table, or when both statements are updates, and the column
that is updated in the triggered action is the same as the column that
the triggering statement updates.
So the alternative is handle with the NEW variable directly.
For that you need to use a procedure with the triggers reference resource, which means the procedure will able to act like the trigger by self.
Below is my example which I run with dbaccess over a Informix v11.70.
This resource is available only for versions +11 of the engine, as far I remember.
create table teste ( Name NVARCHAR(100), Type INT , Plan NVARCHAR(100) );
Table created.
create procedure check_name_values()
referencing new as n for teste ;;
define check_type integer ;;
define check_plan NVARCHAR ;;
if upper(n.name) like 'F%' then
let n.type = 1;;
let n.plan = "Test";;
end if
end procedure ;
Routine created.
;
create trigger trg_tablename_ins
insert on teste
referencing new as new
for each row
(
execute procedure check_name_values() with trigger references
);
Trigger created.
insert into teste values ('cesar',99,'myplan');
1 row(s) inserted.
insert into teste (name) values ('fernando');
1 row(s) inserted.
insert into teste values ('Fernando',100,'your plan');
1 row(s) inserted.
select * from teste ;
name cesar
type 99
plan myplan
name fernando
type 1
plan Test
name Fernando
type 1
plan Test
3 row(s) retrieved.
drop table if exists teste;
Table dropped.
drop procedure if exists check_name_values;
Routine dropped.
create trigger trg_tablename_ins
insert on tablename
referencing new as new
for each row
(
execute procedure check_name_values
(
new.name,
new.type,
new.plan
)
);
create procedure check_name_values
(
name NVARCHAR,
new_type integer,
new_plan NVARCHAR,
)
define check_type integer ;
define check_plan NVARCHAR ;
let check_type = 1;
let check_plan = "Test";
if name = 'F%'
then
insert into tablename (name,type,plan) values (name,check_type,check_plan);
else
insert into tablename (name,type,plan) values (name,new_type,new_plan);
end if ;
end procedure ;
Here is my version an adaptation of an old example I found in the informix usenet group.
It is possible to update columns in a trigger statement but not very straight forward. You have to use stored procedures an the into statement with the execute procedure command.
It worked here for IBM Informix Dynamic Server Version 12.10.FC11WE.
drop table if exists my_table;
drop sequence if exists my_table_seq;
create table my_table (
id INTEGER
NOT NULL,
col_a char(32)
NOT NULL,
col_b char(20)
NOT NULL,
hinweis char(64),
uslu char(12)
DEFAULT USER
NOT NULL,
dtlu DATETIME YEAR TO SECOND
DEFAULT CURRENT YEAR TO SECOND
NOT NULL
)
;
create sequence my_table_seq
increment 1
start 1;
drop procedure if exists get_user_datetime();
create function get_user_datetime() returning char(12),datetime year to second;
return user, current year to second;
end function
;
drop trigger if exists ti_my_table;
create trigger ti_my_table insert on my_table referencing new as n for each row (
execute function get_user_datetime() into uslu, dtlu
)
;
drop trigger if exists tu_my_table;
create trigger tu_my_table update on my_table referencing new as n for each row (
execute function get_user_datetime() into uslu, dtlu
)
;
insert into my_table values (my_table_seq.nextval, "a", "b", null, "witz", mdy(1,1,1900)) ;
SELECT *
FROM my_table
WHERE 1=1
;

mass inserting into model in rails, how to auto increment id field?

I have a model for stocks and a model for stock_price_history.
I want to mass insert with this
sqlstatement = "INSERT INTO stock_histories SELECT datapoint1 AS id,
datapoint2 AS `date` ...UNION SELECT datapoint9,10,11,12,13,14,15,16,
UNION SELECT datapoint 17... etc"
ActiveRecord::Base.connection.execute sqlstatement
However, I don't actually want to use datapoint1 AS id. If I leave it blank I get an error that my model has 10 fields and I'm inserting only 9 and that it is missing the primary key.
Is there a way to force an auto increment on the id when inserting by SQL?
Edit: Bonus question cause I'm a noob. I am developing in SQLite3 and deploying to a Posgres (i.e. Heroku), Will I need to modify the above mass insert statement so it's for a posgres database?
2nd edit: my initial question had Assets and AssetHistory instead of Stocks and Stock_Histories. I changed it to Stocks / Stock price histories because I thought it was more intuitive to understand. Therefore some answers refer to Asset Histories for this reason.
You can change your SQL and be more explicit about which fields you're inserting, and leave id out of the list:
insert into asset_histories (date) select datapoint2 as `date` ...etc
Here's a long real example:
jim=# create table test1 (id serial not null, date date not null, name text not null);
NOTICE: CREATE TABLE will create implicit sequence "test1_id_seq" for serial column "test1.id"
CREATE TABLE
jim=# create table test2 (id serial not null, date date not null, name text not null);
NOTICE: CREATE TABLE will create implicit sequence "test2_id_seq" for serial column "test2.id"
CREATE TABLE
jim=# insert into test1 (date, name) values (now(), 'jim');
INSERT 0 1
jim=# insert into test1 (date, name) values (now(), 'joe');
INSERT 0 1
jim=# insert into test1 (date, name) values (now(), 'bob');
INSERT 0 1
jim=# select * from test1;
id | date | name
----+------------+------
1 | 2013-03-14 | jim
2 | 2013-03-14 | joe
3 | 2013-03-14 | bob
(3 rows)
jim=# insert into test2 (date, name) select date, name from test1 where name <> 'jim';
INSERT 0 2
jim=# select * from test2;
id | date | name
----+------------+------
1 | 2013-03-14 | joe
2 | 2013-03-14 | bob
(2 rows)
As you can see, only the selected rows were inserted, and they were assigned new id values in table test2. You'll have to be explicit about all the fields you want to insert, and ensure that the ordering of the insert and the select match.
Having said all that, you might want to look into the activerecord-import gem, which makes this sort of thing a lot more Railsy. Assuming you have a bunch of new AssetHistory objects (not persisted yet), you could insert them all with:
asset_histories = []
asset_histories << AssetHistory.new date: some_date
asset_histories << AssetHistory.new date: some_other_date
AssetHistory.import asset_histories
That will generate a single efficient insert into the table, and handle the id for you. You'll still need to query some data and construct the objects, which may not be faster than doing it all with raw SQL, but may be a better alternative if you've already got the data in Ruby objects.

Resources