Terraform Windows server 2016 adding and running scripts using winery - terraform-provider-azure

I'm of a need to add one powershell script into a windows server 2016 and run in after to install a specific program.
The code that I'm using will be shared only for the Windows Machine creation since the rest of it is only default settings.
resource "azurerm_windows_virtual_machine" "vm-clt" {
count = local.VMInstance
name = "windows${count.index + 1}"
location = var.location
availability_set_id = azurerm_availability_set.avset.id
resource_group_name = var.resource_group_name
network_interface_ids = [element(azurerm_network_interface.nic_vm.*.id, count.index)]
size = "Standard_B1s"
admin_username = var.username
admin_password = var.adminpassword
enable_automatic_updates = "false"
provision_vm_agent = "true"
depends_on = [azurerm_network_interface.nic_vm]
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2016-Datacenter"
version = "latest"
}
os_disk {
name = lower("${local.prefixStorageName}${count.index + 1}")
caching = "ReadWrite"
storage_account_type = "Premium_LRS"
}
provisioner "file" {
source = "Agent.ps1"
destination = "C:/Windows/Temp/Agent.ps1"
connection {
type = "winrm"
port = "5985"
https = false
insecure = true
user = var.username
password = var.adminpassword
host = "${element(azurerm_public_ip.pip_vm.*.ip_address, count.index)}"
timeout = "5m"
}
}
provisioner "remote-exec" {
inline = [
"powershell.exe -ExecutionPolicy Bypass -File C:\\Windows\\Temp\\Agent.ps1 -APIKey 2b076e91c7xxxxxxxx4fd25a094dce"
]
connection {
type = "winrm"
user = var.username
password = var.adminpassword
host = "${element(azurerm_public_ip.pip_vm.*.ip_address, count.index)}"
timeout = "5m"
}
}
}
the error that I receive while deploying is: timeout - last error: unknown error Post "http://XXX.XXX.XXX.XXX:5985/wsman": dial tcp XXX.XXX.XXX.XXX:5985: i/o timeout
NOTE: the error is only available during terraform apply

You are missing some configuration in azurerm_windows_virtual_machine like winrm_listner and additional_unattend_content. Also not sure if you have added NSG for WinRm port.
So, after doing some changes , I tested with below script :
provider "azurerm"{
features{}
}
variable "admin_username" {
type = string
default = "adminuser"
}
variable "admin_password" {
type = string
default = "Paassworrdd#12344"
}
data "azurerm_resource_group" "example" {
name = "ansumantest"
}
resource "azurerm_virtual_network" "example" {
name = "example-network"
address_space = ["10.0.0.0/16"]
location = data.azurerm_resource_group.example.location
resource_group_name = data.azurerm_resource_group.example.name
}
resource "azurerm_subnet" "example" {
name = "internal"
resource_group_name = data.azurerm_resource_group.example.name
virtual_network_name = azurerm_virtual_network.example.name
address_prefixes = ["10.0.2.0/24"]
}
resource "azurerm_public_ip" "windows_pip" {
name = "examplevm-PIP"
location = data.azurerm_resource_group.example.location
resource_group_name = data.azurerm_resource_group.example.name
allocation_method = "Static"
}
resource "azurerm_network_security_group" "windows_nsg" {
name = "exampleVM-NSG"
location = data.azurerm_resource_group.example.location
resource_group_name = data.azurerm_resource_group.example.name
security_rule {
name = "RDP"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "3389"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "WinRM"
priority = 110
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "5985"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "HTTP"
priority = 120
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "80"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_subnet_network_security_group_association" "example" {
subnet_id = azurerm_subnet.example.id
network_security_group_id = azurerm_network_security_group.windows_nsg.id
}
resource "azurerm_network_interface" "example" {
name = "example-nic"
location = data.azurerm_resource_group.example.location
resource_group_name = data.azurerm_resource_group.example.name
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.example.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.windows_pip.id
}
}
resource "azurerm_windows_virtual_machine" "vm_persistent" {
name = "vm-persistent"
resource_group_name = data.azurerm_resource_group.example.name
location = data.azurerm_resource_group.example.location
size = "Standard_D4_v3"
# Here my variables for User/Password
admin_username = var.admin_username
admin_password = var.admin_password
network_interface_ids = [azurerm_network_interface.example.id]
custom_data = "${filebase64("C:/Users/user/terraform/test/winrm.ps1")}"
provision_vm_agent = "true"
winrm_listener {
protocol = "Http"
}
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2016-Datacenter"
version = "latest"
}
additional_unattend_content {
setting = "AutoLogon"
content = "<AutoLogon><Password><Value>${var.admin_password}</Value></Password><Enabled>true</Enabled><LogonCount>1</LogonCount><Username>${var.admin_username}</Username></AutoLogon>"
}
additional_unattend_content {
setting = "FirstLogonCommands"
content = "${file("C:/Users/user/terraform/test/firstlogincommand.xml")}"
}
provisioner "remote-exec" {
connection {
host = azurerm_public_ip.windows_pip.ip_address
type = "winrm"
port = 5985
https = false
timeout = "5m"
user = var.admin_username
password = var.admin_password
}
inline = [
"powershell.exe -ExecutionPolicy Unrestricted -Command {Install-WindowsFeature -name Web-Server -IncludeManagementTools}",
]
}
}
FirstLogincommand.xml file
<FirstLogonCommands>
<SynchronousCommand>
<CommandLine>cmd /c "copy C:\AzureData\CustomData.bin C:\winrm.ps1"</CommandLine>
<Description>Move the CustomData file to the working directory</Description>
<Order>12</Order>
</SynchronousCommand>
<SynchronousCommand>
<CommandLine>powershell.exe -sta -ExecutionPolicy Unrestricted -file C:\winrm.ps1</CommandLine>
<Description>Execute the WinRM enabling script</Description>
<Order>13</Order>
</SynchronousCommand>
</FirstLogonCommands>
winrm.ps1
Write-Host "Delete any existing WinRM listeners"
winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null
winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null
Write-Host "Create a new WinRM listener and configure"
winrm create winrm/config/listener?Address=*+Transport=HTTP
winrm set winrm/config/winrs '#{MaxMemoryPerShellMB="0"}'
winrm set winrm/config '#{MaxTimeoutms="7200000"}'
winrm set winrm/config/service '#{AllowUnencrypted="true"}'
winrm set winrm/config/service '#{MaxConcurrentOperationsPerUser="12000"}'
winrm set winrm/config/service/auth '#{Basic="true"}'
winrm set winrm/config/client/auth '#{Basic="true"}'
Write-Host "Configure UAC to allow privilege elevation in remote shells"
$Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System'
$Setting = 'LocalAccountTokenFilterPolicy'
Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force
Write-Host "turn off PowerShell execution policy restrictions"
Set-ExecutionPolicy -ExecutionPolicy Unrestricted
Write-Host "Configure and restart the WinRM Service; Enable the required firewall exception"
Stop-Service -Name WinRM
Set-Service -Name WinRM -StartupType Automatic
netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any
Start-Service -Name WinRM
Output:

Related

NixOS create DB (mysql, mariadb)

Trying to create a database during system configuration. I am attaching the contents of the configuration.nix file below. After running the sudo nixos-rebuild switch command, the mariadb service is started, the database and the user with the given password are created. However, the database is empty, although I expect the contents of the main.sql file to be written to the database.
configuration.nix
let
statsConfig = {
db = "test1";
user = "stats";
password = "1";
};
in
{
services.mysql = {
package = pkgs.mariadb;
enable = true;
ensureDatabases = [
statsConfig.db
];
replication.role = "master";
replication.slaveHost = "127.0.0.1";
replication.masterUser = "${statsConfig.user}";
replication.masterPassword = "${statsConfig.password}";
initialDatabases = [{ name = "${statsConfig.db}"; schema = ./main.sql; }];
initialScript = ./main.sql;
ensureUsers = [
{
name = "${statsConfig.user}";
ensurePermissions = {
"${statsConfig.db}.*" = "ALL PRIVILEGES";
};
}
];
};
systemd.services.setdbpass = {
description = "MySQL database password setup";
wants = [ "mariadb.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = ''
${pkgs.mariadb}/bin/mysql -e "grant all privileges on ${statsConfig.db}.* to ${statsConfig.user}#localhost identified by '${statsConfig.password}';" ${statsConfig.db}
'';
User = "root";
PermissionsStartOnly = true;
RemainAfterExit = true;
};
};
}
main.sql
create table tests
( Id INTEGER NOT NULL,
Name VARCHAR(255) NOT NULL,
primary key(Id)
);
insert into tests values (1, 'a');

AWS ECS Task Definition with terraform and efs - Unknown volume

I'm trying to persist container data and for that I want to mount a volume. Here is my task definition which throws the unknown volume error despite declaring it.
aws_ecs_task_definition.wordpress: Creating... Error: ClientException: Unknown volume 'wordpress-volume'.
locals {
username = jsondecode(data.aws_secretsmanager_secret_version.wordpress.secret_string)["username"]
password = jsondecode(data.aws_secretsmanager_secret_version.wordpress.secret_string)["password"]
}
resource "aws_ecs_task_definition" "wordpress" {
family = "wordpress"
container_definitions = jsonencode([{
name = "wordpress"
image = "wordpress"
essential = true
cpu = 256
memory = 512
entryPoint = [ "sh", "-c"]
command = ["ls -la /var/www/html"]
volumes = [{
name = "wordpress-volume"
efsVolumeConfiguration = {
fileSystemId = aws_efs_file_system.wordpress.id
}
}]
mountPoints = [{
sourceVolume = "wordpress-volume"
containerPath = "/var/www/html"
readOnly = false
}]
environment = [{
name = "WORDPRESS_DB_HOST"
value = "127.0.0.1"},
{
name = "WORDPRESS_DB_USER"
value = local.username
},
{
name = "WORDPRESS_DB_PASSWORD"
value = local.password
},
{
name = "WORDPRESS_DB_NAME"
value = "wordpressdb"
}]
portMappings = [{
protocol = "tcp"
containerPort = 80
hostPort = 80
}]
}])
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
execution_role_arn = aws_iam_role.ecs_task_execution_role.arn
task_role_arn = aws_iam_role.ecs_task_role.arn
cpu = 1024
memory = 3072
}
Your volumes definition isn't supposed to be inside container_definitions but is part of aws_ecs_task_definition resource arguments.
So, you should move this part outside:
volumes = [{
name = "wordpress-volume"
efsVolumeConfiguration = {
fileSystemId = aws_efs_file_system.wordpress.id
}
}]
to
resource "aws_ecs_task_definition" "wordpress" {
...
volume {
name = "wordpress-volume"
efs_volume_configuration {
file_system_id = aws_efs_file_system.wordpress.id
}
}
...
}
see the docs:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_task_definition#volume-block-arguments
https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TaskDefinition.html
And container definition docs:
https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html

F# Akkling Unable to send message through sharding proxy

When I try to send a message to the akka.net region proxy with the following code,
open Akkling.Cluster.Sharding
open Akka.Actor
open Akka.Cluster
open Akka.Cluster.Sharding
open System
open Akkling
let configWithPort (port:int) =
let config = Configuration.parse ("""
akka {
actor {
provider = cluster
}
remote {
dot-netty.tcp {
public-hostname = "localhost"
hostname = "localhost"
port = """ + port.ToString() + """
}
}
cluster {
roles = ["Worker"]
sharding {
journal-plugin-id = "akka.persistence.journal.inmem"
snapshot-plugin-id = "akka.persistence.snapshot-store.inmem"
}
seed-nodes = [ "akka.tcp://cluster-system#localhost:5000" ]
}
}
""")
config
.WithFallback(Akka.Cluster.Tools.Singleton.ClusterSingletonManager.DefaultConfig())
.WithFallback(ClusterSharding.DefaultConfig())
let system1 = ActorSystem.Create("cluster-system", configWithPort 5000)
let system2 = ActorSystem.Create("cluster-system", configWithPort 5001)
/// Domain
type FileCommand = {
ProgramId : string
Duration : TimeSpan
FilePath : string
}
/// Actors
let aggregateRootActor (mailbox:Actor<_>) (msg:FileCommand) =
let nodeAddress = Cluster.Get(mailbox.System).SelfUniqueAddress
logInfof mailbox "Program: [%s] with path [%s] on [%A]" msg.ProgramId msg.FilePath nodeAddress
ignored ()
let extractorFunction (message:FileCommand) =
let entityId = message.ProgramId
let hash = entityId.GetHashCode()
let numberOfShards = 5
let shardId = sprintf "shard_%d" ((abs hash) % numberOfShards)
shardId, entityId, message
let region1 = spawnSharded extractorFunction system1 "fileRouter" (props (actorOf2 aggregateRootActor))
let region2 = spawnSharded extractorFunction system2 "fileRouter" (props (actorOf2 aggregateRootActor))
let shardRegionProxy =
spawnShardedProxy extractorFunction system1 "fileRouterProxy" None
And sending message to the proxy always failed.
shardRegionProxy <! { ProgramId = "a"; Duration = TimeSpan.FromMinutes 10.; FilePath = "\\a_1.mp4" } //this failed
The error message is
> [INFO][8/26/2020 5:13:15 PM][Thread 0027][akka://cluster-system/system/sharding/fileRouterProxyCoordinator/singleton/coordinator] Message [RegisterProxy] from akka://cluster-system/system/sharding/fileRouterProxyProxy to akka://cluster-system/system/sharding/fileRouterProxyCoordinator/singleton/coordinator was not delivered. [6] dead letters encountered. If this is not an expected behavior then akka://cluster-system/system/sharding/fileRouterProxyCoordinator/singleton/coordinator may have terminated unexpectedly. This logging can be turned off or adjusted with configuration settings 'akka.log-dead-letters' and 'akka.log-dead-letters-during-shutdown'.
However these sends are successful.
region1 <! { ProgramId = "d"; Duration = TimeSpan.FromMinutes 8.; FilePath = "\\a_2.mp4" }
region2 <! { ProgramId = "a"; Duration = TimeSpan.FromMinutes 10.; FilePath = "\\a_1.mp4" }
Excuse me,
How do I correctly create the shardingcoordinator?
Or if it was incorrect, what's the problem using shardingcoordinator like this?
The name was wrong, change the code like this, and everything is FINE!
let shardRegionProxy = spawnShardedProxy extractorFunction system1 "fileRouter" (Some "Worker")

Terraform referencing(getting) subnet_id not in current definition file

I am having trouble understanding how to reference a subnet that is not defined in the "current" definition file.
I have two distinct definition files, one for network definition (file A) and other for computing resources (file B).
How can i reference the subnet defined at file A in file B?
File A contains the following
resource "azurerm_resource_group" "NetResourceGroup" {
name = "NetworkResources"
location = "westeurope"
}
resource "azurerm_virtual_network" "vnet" {
name = "mainvnet"
location = "${azurerm_resource_group.NetResourceGroup.location}"
address_space = ["10.0.0.0/16"]
resource_group_name = "${azurerm_resource_group.NetResourceGroup.name}"
}
resource "azurerm_subnet" "dcsubnet" {
name = "dcsubnet"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
resource_group_name = "${azurerm_resource_group.NetResourceGroup.name}"
address_prefix = "10.0.1.0/24"
}
resource "azurerm_subnet" "sqlsubnet" {
name = "sqlsubnet"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
resource_group_name = "${azurerm_resource_group.NetResourceGroup.name}"
address_prefix = "10.0.2.0/24"
}
File B contains the following:
resource "azurerm_resource_group" "DCResourceGroup" {
name = "DomainControllerVMs"
location = "westeurope"
}
resource "azurerm_storage_account" "DCStorageAccoount" {
name = "domaincontrollersdisks"
location = "${azurerm_resource_group.DCResourceGroup.location}"
resource_group_name = "${azurerm_resource_group.DCResourceGroup.name}"
account_tier = "Standard"
account_replication_type = "LRS"
}
resource "azurerm_availability_set" "DCAvailabilitySet" {
name = "dcsavailset"
location = "${azurerm_resource_group.DCResourceGroup.location}"
resource_group_name = "${azurerm_resource_group.DCResourceGroup.name}"
platform_fault_domain_count = 2
platform_update_domain_count = 2
managed = true
}
resource "azurerm_network_interface" "dc1nic" {
name = "dcnic${count.index}"
location = "${azurerm_resource_group.DCResourceGroup.location}"
resource_group_name = "${azurerm_resource_group.DCResourceGroup.name}"
ip_configuration {
name = "ipconfig${count.index}"
subnet_id = "${?????????}"
private_ip_address_allocation = "Dynamic"
}
}
Thanks in advance

ECS and Application Load Balancer not Registering Ephemeral Ports using Terraform

I am creating an application using Docker on ECS. I have the following Terraform file (concatenated for ease of reading):
resource "aws_ecs_cluster" "my-cluster" {
name = "my-cluster"
}
resource "aws_launch_configuration" "ecs" {
name = "ECS Cluster"
image_id = "ami-1c002379"
instance_type = "m4.xlarge"
security_groups = ["sg-4218de2a"]
iam_instance_profile = "${aws_iam_instance_profile.ecs.name}"
# TODO: is there a good way to make the key configurable sanely?
key_name = "my-key"
associate_public_ip_address = true
user_data = "#!/bin/bash\necho ECS_CLUSTER='${aws_ecs_cluster.my-cluster.name}' > /etc/ecs/ecs.config"
}
resource "aws_iam_role" "ecs_host_role" {
name = "ecs_host_role"
assume_role_policy = "${file("policies/ecs-role.json")}"
}
resource "aws_iam_role_policy" "ecs_instance_role_policy" {
name = "ecs_instance_role_policy"
policy = "${file("policies/ecs-instance-role-policy.json")}"
role = "${aws_iam_role.ecs_host_role.id}"
}
resource "aws_iam_policy_attachment" "ecs_for_ec2" {
name = "ecs-for-ec2"
roles = ["${aws_iam_role.ecs_host_role.id}"]
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_role" "ecs_service_role" {
name = "ecs_service_role"
assume_role_policy = "${file("policies/ecs-role.json")}"
}
resource "aws_iam_role_policy" "ecs_service_role_policy" {
name = "ecs_service_role_policy"
policy = "${file("policies/ecs-service-role-policy.json")}"
role = "${aws_iam_role.ecs_service_role.id}"
}
resource "aws_iam_instance_profile" "ecs" {
name = "ecs-instance-profile"
path = "/"
role = "${aws_iam_role.ecs_host_role.name}"
}
resource "aws_autoscaling_group" "ecs-cluster" {
availability_zones = ["us-east-2a", "us-east-2b"]
name = "ECS ${aws_ecs_cluster.my-cluster.name}"
min_size = "1"
max_size = "2"
desired_capacity = "1"
health_check_type = "EC2"
launch_configuration = "${aws_launch_configuration.ecs.name}"
vpc_zone_identifier = ["subnet-8e9abce7"]
}
resource "aws_alb" "front-end" {
name = "alb"
internal = false
security_groups = ["sg-4218de2a"]
subnets = ["subnet-8e9abce7", "subnet-e11d779a"]
enable_deletion_protection = true
}
resource "aws_alb_listener" "front_end" {
load_balancer_arn = "${aws_alb.front-end.arn}"
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = "${aws_alb_target_group.fe-tg.arn}"
type = "forward"
}
}
resource "aws_alb_target_group" "fe-tg" {
name = "fe-tg"
port = 8080
protocol = "HTTP"
vpc_id = "vpc-22eeb84b"
health_check {
path = "/poc/healthy.html"
}
}
resource "aws_autoscaling_attachment" "asg_attachment_bar" {
autoscaling_group_name = "${aws_autoscaling_group.ecs-cluster.name}"
alb_target_group_arn = "${aws_alb_target_group.fe-tg.arn}"
}
resource "template_file" "task_container_definition" {
template = "${file("container-defintion.json.tpl")}"
vars {
aws_region = "${var.region}"
aws_account = "${var.account}"
image = "${var.image}"
tag = "${var.tag}"
}
}
resource "aws_ecs_task_definition" "my-td" {
family = "my-task"
container_definitions = "${template_file.task_container_definition.rendered}"
}
resource "aws_ecs_service" "poc" {
name = "poc-v4"
cluster = "${aws_ecs_cluster.my-cluster.name}"
task_definition = "${aws_ecs_task_definition.my-td.arn}"
desired_count = 3
iam_role = "${aws_iam_role.ecs_service_role.arn}"
depends_on = ["aws_iam_role_policy.ecs_service_role_policy", "aws_alb_listener.front_end"]
deployment_maximum_percent = 200
deployment_minimum_healthy_percent = 51
load_balancer {
target_group_arn = "${aws_alb_target_group.fe-tg.id}"
container_name = "greeter"
container_port = 0
}
placement_constraints {
type = "memberOf"
expression = "attribute:ecs.availability-zone in [us-east-2a, us-east-2b]"
}
placement_strategy {
type = "binpack"
field = "cpu"
}
}
Task Definition Template:
[{
"environment": [],
"name": "greeter",
"mountPoints": [],
"image": "${aws_account}.dkr.ecr.${aws_region}.amazonaws.com/${image}:${tag}",
"cpu": 0,
"portMappings": [
{
"containerPort": 8080, "hostPort": 0
}
],
"memory": 2048,
"memoryReservation": 1024,
"essential": true,
"volumesFrom": []
}]
I am asking ECS to spin up at least 3 tasks within my service. But, for some reason, my Application Load Balancer isn't putting the Ephemeral Ports into the health check. It's putting the actually tomcat port (8080).
When I create a service by hand it works just fine, but using Terraform it doesn't. Does anything stick out?
Yes, I saw the setting. the resource aws_alb_listener is only used to define default rule (the last, lowest priority rule)
Please add resource aws_alb_listener_rule, sample codes for you:
resource "aws_alb_listener_rule" "static" {
listener_arn = "${aws_alb_listener.front_end.arn}"
priority = 100
action {
type = "forward"
target_group_arn = "${aws_alb_target_group.fe-tg.arn}"
}
condition {
field = "path-pattern"
values = ["/static/*"]
}
}
You can add more resource aws_alb_listener_rule with different priority (100, 101, 102,...).
With it, you should be fine to get dynamic ports properly.

Resources