AKS ARM template | Query - azure-aks

I am working to build an AKS cluster using ARM template. i have a situation where if i define OS Type to Windows, template should populate "WindowsProfile" and if i choose linux in OS type then it should populate linuxprofile.
here is the template with linux profile chosen, if i provide parameter OSType value to windows, how do i insert windowsprofile here.
"name": "[parameters('AKSClustername')]",
"type": "Microsoft.ContainerService/managedClusters",
"apiVersion": "2021-05-01",
"location": "[parameters('Region')]",
"properties": {
"kubernetesVersion": "[parameters('kubernetesversion')]",
"dnsPrefix": "dnsprefix",
"agentPoolProfiles": [
{
"name": "[parameters('agentPoolName')]",
"count": "[parameters('nodeCount')]",
"vmSize": "[parameters('vmSize')]",
"osType": "[parameters('OSType')]",
"storageProfile": "ManagedDisks",
"enableAutoScaling": "[parameters('autoscalepools')]",
// "availabilityZones": "[if(equals(parameters('availabilityZones'), bool('true')), variables('AVZone'), json('null'))]"
"availabilityZones": "[if(equals(parameters('availabilityZones'), or('1', '2', '3')), variables('AVZone'), json('null'))]"
}
],
"linuxProfile": {
"adminUsername": "adminUserName",
"ssh": {
"publicKeys": [
{
"keyData": "keyData"
}
]
}
},

To be honest it won't be really readable, maintainable with ARM Template.
I would suggest you to have a look at Bicep.
It will compile to ARM template but will be more readable. Using bicep you would be able to do something like that:
//main.bicep
param AKSClustername string
param Region string
param kubernetesversion string
param agentPoolName string
param nodeCount int
param vmSize string
param OSType string
param autoscalepools bool
// Define common properties
var baseProperties = {
kubernetesVersion: kubernetesversion
dnsPrefix: 'dnsprefix'
agentPoolProfiles: [
{
name: agentPoolName
count: nodeCount
vmSize: vmSize
osType: OSType
storageProfile: 'ManagedDisks'
enableAutoScaling: autoscalepools
}
]
}
// Add profile based on OSType
var propertiesWithOsProfile = union(baseProperties, OSType == 'Linux' ? {
linuxProfile: {
adminUsername: 'adminUserName'
ssh: {
publicKeys: [
{
keyData: 'keyData'
}
]
}
}
} : {
windowsProfile: {
adminPassword: ''
adminUsername: ''
licenseType: 'Windows_Server'
}
}
)
// Create the cluster
resource aks 'Microsoft.ContainerService/managedClusters#2021-05-01' = {
name: AKSClustername
location: Region
properties: propertiesWithOsProfile
}
Both Azure CLI and Powershell supports Bicep.
If you do need to generate an ARM template, you could run this command
az bicep build --file main.bicep
It will generate an ARM template for you.

Related

How to re-use platform parameter when building an electron application using `electron-forge make --platform=win32`?

We are building an electron desktop application for macos, linux, and windows.
Here is our electron-forge config:
// forge.config.js
const os = require('os')
const package = require('./package.json')
function getExtraResource() {
const p = os.platform()
switch (p) {
case 'darwin':
return ['./static/bin/pasteld-mac']
case 'linux':
return ['./static/bin/pasteld-linux']
case 'win32':
return ['./static/bin/pasteld-win.exe']
default:
throw new Error(
'forge.config.js error: your OS is not supported. Supported OS are: darwin, linux, win32',
)
}
}
function getIcon() {
const p = os.platform()
switch (p) {
case 'darwin':
return './static/icons/icon.icns'
case 'linux':
return './static/icons/icon.png'
case 'win32':
return './static/icons/icon.ico'
default:
throw new Error(
'forge.config.js error: your OS is not supported. Supported OS are: darwin, linux, win32',
)
}
}
module.exports = {
packagerConfig: {
name: package.productName,
executableName: package.name,
icon: getIcon(),
asar: true,
extraResource: getExtraResource(),
protocols: [
{
protocol: package.name,
name: package.name,
schemes: [package.protocolSchemes.native],
},
],
},
makers: [
{
name: '#electron-forge/maker-squirrel',
config: {
exe: `${package.name}.exe`,
setupIcon: './static/icons/icon.ico',
loadingGif: './static/icons/icon.gif',
iconUrl:
'https://raw.githubusercontent.com/pastelnetwork/pastel-electron-wallet/master/static/icons/icon.ico',
title: package.productName,
setupExe: `${package.productName} Setup - v${package.version}.exe`,
skipUpdateIcon: true,
},
},
{
name: '#electron-forge/maker-dmg',
config: {
icon: './static/icons/icon.icns',
name: package.productName,
},
},
{
name: '#electron-forge/maker-deb',
config: {
options: {
icon: './static/icons/icon.png',
},
},
},
],
plugins: [
[
'#electron-forge/plugin-webpack',
{
mainConfig: './webpack.main.config.js',
renderer: {
config: './webpack.renderer.config.js',
entryPoints: [
{
html: './src/index.html',
js: './src/renderer.tsx',
name: 'main_window',
},
],
},
},
],
],
}
As you can see in the above file, getExtraResource() detects the os type and pick the right executable file based on it. In other words, running run make on a proper platform is all we need to build the application.
However, we are now going to build the windows installer on linux wine image, more specifically using electronuserland/builder:wine-mono image.
Everything is working as expected so far, except one thing - we still need to add a step to the switch clause in the getExtraResource() to pick the windows executable in the builder image instead of linux executable(note that the builder image is still a linux image!).
It will be something like this:
# forge.config.js
//...
function getExtraResource() {
const p = os.platform()
switch (p) {
case 'darwin':
return ['./static/bin/pasteld-mac']
case 'linux':
if (build_arg === 'win32') {
return ['./static/bin/pasteld-win.exe']
}
return ['./static/bin/pasteld-linux']
case 'win32':
return ['./static/bin/pasteld-win.exe']
default:
throw new Error(
'forge.config.js error: your OS is not supported. Supported OS are: darwin, linux, win32',
)
}
}
//...
How can I get the build_arg in the above file?
Build command is yarn make --platform=win32 in the wine builder image.
Thanks in advance!
Electron Forge supports hooks and some of them get passed the platform and arch which you could save globally.
Currently the earliest hook that gets passed these is packageAfterCopy which might be called too late for your usage but worth a try:
plugins: [
// ...
],
hooks: {
packageAfterCopy: async (
forgeConfig,
buildPath,
electronVersion,
platform,
arch
) => {
console.log(buildPath, electronVersion, platform, arch);
},
}
}
I've also opened a PR to add these parameters to the generateAssets hook.
We could resolve this by using process.argv.
More specifically, we run this command to build windows installer in linux container:
yarn make --platform=win32
And the string win32 could be caught by process.argv[3] anywhere.
See the detailed implementation here.
Please advice if you have a better solution!

Error using EFS in ECS, returns unknown filesystem type 'efs'

I'm using a docker image for jenkins (jenkins/jenkins:2.277.1-lts-alpine) in an AWS ECS, and I want to persist the data using a AWS EFS.
I created the EFS and got the ID (fs-7dcef848)
My terraform code looks like:
resource "aws_ecs_service" "jenkinsService" {
cluster = var.ECS_cluster
name = var.jenkins_name
task_definition = aws_ecs_task_definition.jenkinsService.arn
deployment_maximum_percent = "200"
deployment_minimum_healthy_percent = 50
desired_count = var.service_desired_count
tags = {
"ManagedBy" : "Terraform"
}
}
resource "aws_ecs_task_definition" "jenkinsService" {
family = "${var.jenkins_name}-task"
container_definitions = file("task-definitions/service.json")
volume {
name = var.EFS_name
efs_volume_configuration {
file_system_id = "fs-7dcef848"
}
}
tags = {
"ManagedBy" : "Terraform"
}
}
and the service.json
[
{
"name": "DevOps-jenkins",
"image": "jenkins/jenkins:2.284-alpine",
"cpu": 0,
"memoryReservation": 1024,
"essential": true,
"portMappings": [
{
"containerPort" : 8080,
"hostPort" : 80
}
],
"mountPoints": [
{
"sourceVolume" : "DevOps-Jenkins",
"containerPath" : "/var/jenkins_home"
}
]
}
]
The terraform apply works OK, but the task cannot start returning:
Stopped reason Error response from daemon: create ecs-DevOps-jenkins-task-33-DevOps-Jekins-bcb381cd9dd0f7ae2700: VolumeDriver.Create: mounting volume failed: mount: unknown filesystem type 'efs'
Does anyone know whats happening?
There is another way to persist data?
Thanks in advance.
Solved: The first attempt was to install the "amazon-efs-utils" package using a remote-exec
But following the indications provided by #Oguzhan Aygun , I did it on the USER DATA section and it worked!
Thanks!

How to order portable storage using SoftLayer API

Is there a simple method to order portable storage given the input datacenter such as WDC06 and size 500 GB.
At the moment the method I know of is painful, complex and manual, if I have do this in a new datacenter. First get the configuration through Product_Package and then going through long list of items to find the right product id, itemId ... etc. This call also requires that I should know the pkgid before hand.
categories = client['Product_Package'].getConfiguration(id=pkgId, mask='isRequired, itemCategory.id, itemCategory.name, itemCategory.categoryCode')
Please if you can share some code samples if this ordering process can be simplified.
I have not idea how you are ordering the portable storage,but you need to use the placeOrder method and get the proper prices for the disk size that you want to order, this literature can help you to understand how to make orders:
https://sldn.softlayer.com/blog/cmporter/location-based-pricing-and-you
https://sldn.softlayer.com/blog/bpotter/going-further-softlayer-api-python-client-part-3
The process to pick the correct prices is hard, but you can use the object filters to get them:
https://sldn.softlayer.com/article/object-filters
and here a sample using the softlayer Python client:
import SoftLayer
# Your SoftLayer API username and key.
API_USERNAME = 'set me'
API_KEY = 'set me'
datacenter = "wdc06" # lower case
size = "500" # the size of the disk
diskDescription = "optional value"
client = SoftLayer.Client(username=API_USERNAME, api_key=API_KEY)
package = 198 # this package is always the same
# Using a filter to get the price for an especific disk size
# into an specific datacenter
filter = {
"itemPrices": {
"pricingLocationGroup": {
"locations": {
"name": {
"operation": datacenter
}
}
},
"item": {
"capacity": {
"operation": size
}
}
}
}
price = client['SoftLayer_Product_Package'].getItemPrices(id=package, filter=filter)
# In case the request do not return any price we will look for the standard price
if not price:
filter = {
"itemPrices": {
"locationGroupId": {
"operation": "is null"
},
"item": {
"capacity": {
"operation": size
}
}
}
}
price = client['SoftLayer_Product_Package'].getItemPrices(id=package, filter=filter)
if not price:
print ("there is no a price for the selected datacenter %s and disk size %s" % (datacenter, size))
sys.exit(0)
# getting the locationId for the order template
filter = {
"regions": {
"location": {
"location": {
"name": {
"operation": datacenter
}
}
}
}
}
location = client['SoftLayer_Product_Package'].getRegions(id=package, filter=filter)
# now we are creating the ordertemplate
orderTemplate = {
"complexType": "SoftLayer_Container_Product_Order_Virtual_Disk_Image",
"packageId": package,
"location": location[0]["location"]["location"]["id"],
"prices": [{"id": price[0]["id"]}],
"diskDescription": diskDescription
}
#When you are ready to order change "verifyOrder" by "placeOrder"
order = client['SoftLayer_Product_Order'].verifyOrder(orderTemplate)
print order

Access single object in model using Metalsmith and swig

I have a json data file with multiple objects with named keys in it.
{
"berlin:" : {
"location": "Berlin",
"folder": "berlin-2016"
},
"seattle" : {
"location": "Seattle ",
"folder": "seattle-2016"
}
}
In my content file I would like to specify which object in the model to use and then refer to that in swig. Something like this:
---
model:
conference: conferences['berlin']
---
{{ model.conference.location }}
Is this possible?
That's definitely possible with metalsmith. I don't have a complete picture of your build process, but for this solution you'll have to use the metalsmith javascript api:
./data.json
{
"berlin:" : {
"location": "Berlin",
"folder": "berlin-2016"
},
"seattle" : {
"location": "Seattle ",
"folder": "seattle-2016"
}
}
./build.js
// Dependencies
var metalsmith = require('metalsmith');
var layouts = require('metalsmith-layouts');
// Import metadata
var metadata = require('./data.json');
// Build
metalsmith(__dirname)
// Make data available
.metadata(data)
// Process templates
.use(layouts('swig'))
// Build site
.build(function(err){
if (err) throw err;
});
Then run node build.js in your root project folder to build. In your templates the data from data.json would then be available as {{ berlin.location }}.
You can also do this without the javascript api (which I don't recommend because you lose some flexibility), in which case you would use a plugin (for example: metalsmith-json)

How to make elasticsearch add the timestamp field to every document in all indices?

Elasticsearch experts,
I have been unable to find a simple way to just tell ElasticSearch to insert the _timestamp field for all the documents that are added in all the indices (and all document types).
I see an example for specific types:
http://www.elasticsearch.org/guide/reference/mapping/timestamp-field/
and also see an example for all indices for a specific type (using _all):
http://www.elasticsearch.org/guide/reference/api/admin-indices-put-mapping/
but I am unable to find any documentation on adding it by default for all documents that get added irrespective of the index and type.
Elasticsearch used to support automatically adding timestamps to documents being indexed, but deprecated this feature in 2.0.0
From the version 5.5 documentation:
The _timestamp and _ttl fields were deprecated and are now removed. As a replacement for _timestamp, you should populate a regular date field with the current timestamp on application side.
You can do this by providing it when creating your index.
$curl -XPOST localhost:9200/test -d '{
"settings" : {
"number_of_shards" : 1
},
"mappings" : {
"_default_":{
"_timestamp" : {
"enabled" : true,
"store" : true
}
}
}
}'
That will then automatically create a _timestamp for all stuff that you put in the index.
Then after indexing something when requesting the _timestamp field it will be returned.
Adding another way to get indexing timestamp. Hope this may help someone.
Ingest pipeline can be used to add timestamp when document is indexed. Here, is a sample example:
PUT _ingest/pipeline/indexed_at
{
"description": "Adds indexed_at timestamp to documents",
"processors": [
{
"set": {
"field": "_source.indexed_at",
"value": "{{_ingest.timestamp}}"
}
}
]
}
Earlier, elastic search was using named-pipelines because of which 'pipeline' param needs to be specified in the elastic search endpoint which is used to write/index documents. (Ref: link) This was bit troublesome as you would need to make changes in endpoints on application side.
With Elastic search version >= 6.5, you can now specify a default pipeline for an index using index.default_pipeline settings. (Refer link for details)
Here is the to set default pipeline:
PUT ms-test/_settings
{
"index.default_pipeline": "indexed_at"
}
I haven't tried out yet, as didn't upgraded to ES 6.5, but above command should work.
You can make use of default index pipelines, leverage the script processor, and thus emulate the auto_now_add functionality you may know from Django and DEFAULT GETDATE() from SQL.
The process of adding a default yyyy-MM-dd HH:mm:ss date goes like this:
1. Create the pipeline and specify which indices it'll be allowed to run on:
PUT _ingest/pipeline/auto_now_add
{
"description": "Assigns the current date if not yet present and if the index name is whitelisted",
"processors": [
{
"script": {
"source": """
// skip if not whitelisted
if (![ "myindex",
"logs-index",
"..."
].contains(ctx['_index'])) { return; }
// don't overwrite if present
if (ctx['created_at'] != null) { return; }
ctx['created_at'] = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date());
"""
}
}
]
}
Side note: the ingest processor's Painless script context is documented here.
2. Update the default_pipeline setting in all of your indices:
PUT _all/_settings
{
"index": {
"default_pipeline": "auto_now_add"
}
}
Side note: you can restrict the target indices using the multi-target syntax:
PUT myindex,logs-2021-*/_settings?allow_no_indices=true
{
"index": {
"default_pipeline": "auto_now_add"
}
}
3. Ingest a document to one of the configured indices:
PUT myindex/_doc/1
{
"abc": "def"
}
4. Verify that the date string has been added:
GET myindex/_search
An example for ElasticSearch 6.6.2 in Python 3:
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts=["localhost"])
timestamp_pipeline_setting = {
"description": "insert timestamp field for all documents",
"processors": [
{
"set": {
"field": "ingest_timestamp",
"value": "{{_ingest.timestamp}}"
}
}
]
}
es.ingest.put_pipeline("timestamp_pipeline", timestamp_pipeline_setting)
conf = {
"settings": {
"number_of_shards": 2,
"number_of_replicas": 1,
"default_pipeline": "timestamp_pipeline"
},
"mappings": {
"articles":{
"dynamic": "false",
"_source" : {"enabled" : "true" },
"properties": {
"title": {
"type": "text",
},
"content": {
"type": "text",
},
}
}
}
}
response = es.indices.create(
index="articles_index",
body=conf,
ignore=400 # ignore 400 already exists code
)
print ('\nresponse:', response)
doc = {
'title': 'automatically adding a timestamp to documents',
'content': 'prior to version 5 of Elasticsearch, documents had a metadata field called _timestamp. When enabled, this _timestamp was automatically added to every document. It would tell you the exact time a document had been indexed.',
}
res = es.index(index="articles_index", doc_type="articles", id=100001, body=doc)
print(res)
res = es.get(index="articles_index", doc_type="articles", id=100001)
print(res)
About ES 7.x, the example should work after removing the doc_type related parameters as it's not supported any more.
first create index and properties of the index , such as field and datatype and then insert the data using the rest API.
below is the way to create index with the field properties.execute the following in kibana console
`PUT /vfq-jenkins
{
"mappings": {
"properties": {
"BUILD_NUMBER": { "type" : "double"},
"BUILD_ID" : { "type" : "double" },
"JOB_NAME" : { "type" : "text" },
"JOB_STATUS" : { "type" : "keyword" },
"time" : { "type" : "date" }
}}}`
the next step is to insert the data into that index:
curl -u elastic:changeme -X POST http://elasticsearch:9200/vfq-jenkins/_doc/?pretty
-H Content-Type: application/json -d '{
"BUILD_NUMBER":"83","BUILD_ID":"83","JOB_NAME":"OMS_LOG_ANA","JOB_STATUS":"SUCCESS" ,
"time" : "2019-09-08'T'12:39:00" }'

Resources