• Skip to main content

Uly.me

cloud engineer

  • Home
  • About
  • Archives

volume

AWS Create Volume From Snapshot with Tags

January 19, 2023

Here’s another script that creates a volume from a snapshot, but also add the tags.

#!/bin/bash
read -p "server     : " server
read -p "volumeId   : " volume
read -p "snapshotId : " snapshot
read -p "region     : " region
read -p "zone       : " zone
read -p "profile    : " profile
# get tags
tags1=$(aws ec2 describe-volumes --volume-ids $volume --query 'Volumes[].Tags[]' --region $region --profile $profile)
# remove quotes
tags2=$(echo "$tags1" | tr -d '"')
# remove spaces
tags3=$(echo $tags2 | sed 's/ //g')
# replace : with =
tags4=$(echo $tags3 | sed 's/:/=/g')
# if empty value replace with quotes
tags5=$(echo $tags4 | sed 's/Value=}/Value=""}/g')
# create volume
aws ec2 create-volume \
--availability-zone $zone \
--encrypted \
--iops 3000 \
--volume-type gp3 \
--snapshot-id $snapshot \
--tag-specifications 'ResourceType=volume,Tags='$tags5'' \
--region $region \
--profile $profile

#!/bin/bash read -p "server : " server read -p "volumeId : " volume read -p "snapshotId : " snapshot read -p "region : " region read -p "zone : " zone read -p "profile : " profile # get tags tags1=$(aws ec2 describe-volumes --volume-ids $volume --query 'Volumes[].Tags[]' --region $region --profile $profile) # remove quotes tags2=$(echo "$tags1" | tr -d '"') # remove spaces tags3=$(echo $tags2 | sed 's/ //g') # replace : with = tags4=$(echo $tags3 | sed 's/:/=/g') # if empty value replace with quotes tags5=$(echo $tags4 | sed 's/Value=}/Value=""}/g') # create volume aws ec2 create-volume \ --availability-zone $zone \ --encrypted \ --iops 3000 \ --volume-type gp3 \ --snapshot-id $snapshot \ --tag-specifications 'ResourceType=volume,Tags='$tags5'' \ --region $region \ --profile $profile

Filed Under: Cloud, Linux Tagged With: aws, create, snapshot, tags, volume

AWS Create Volume From Snapshot

January 18, 2023

Here’s a bash script that creates a volume from a snapshot in AWS.

#!/bin/bash
read -p "snapshotId : " snapshot
read -p "server     : " server
read -p "tag1       : " tag1
read -p "tag2       : " tag2
read -p "region     : " region
read -p "zone       : " zone
read -p "profile    : " profile
aws ec2 create-volume \
--availability-zone $zone \
--encrypted \
--iops 3000 \
--volume-type gp3 \
--snapshot-id $snapshot \
--tag-specifications "ResourceType=volume,Tags=[{Key=Name,Value="$server"},{Key=tag1,Value="$tag1"},{Key=tag2,Value="$tag2"}]" \
--region $region \
--profile $profile

#!/bin/bash read -p "snapshotId : " snapshot read -p "server : " server read -p "tag1 : " tag1 read -p "tag2 : " tag2 read -p "region : " region read -p "zone : " zone read -p "profile : " profile aws ec2 create-volume \ --availability-zone $zone \ --encrypted \ --iops 3000 \ --volume-type gp3 \ --snapshot-id $snapshot \ --tag-specifications "ResourceType=volume,Tags=[{Key=Name,Value="$server"},{Key=tag1,Value="$tag1"},{Key=tag2,Value="$tag2"}]" \ --region $region \ --profile $profile

Filed Under: Cloud, Linux Tagged With: bash, create, script, snapshot, volume

GCP Extend ext4 Boot Volume

July 28, 2022

Here’s how to extend an ext4 boot volume.

gcloud compute disks resize DISK_NAME --size DISK_SIZE --zone ZONE --project PROJECTID

gcloud compute disks resize DISK_NAME --size DISK_SIZE --zone ZONE --project PROJECTID

Resize the file system. Example / is on sda3.

growpart /dev/sda 3
resize2fs /dev/sda3

growpart /dev/sda 3 resize2fs /dev/sda3

Filed Under: Cloud, Linux Tagged With: boot, ext4, extend, gcloud, gcp, growpart, resize2fs, volume

Setup LVM on a VM

February 5, 2022

How to setup Logical Volume Manager on a VM.

Install LVM.

yum install lvm2

yum install lvm2

Check the disks available. We are going to run LVM on /dev/sdb.

$ lsblk
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda      8:0    0   20G  0 disk 
├─sda1   8:1    0  200M  0 part /boot/efi
└─sda2   8:2    0 19.8G  0 part /
sdb      8:16   0   20G  0 disk

$ lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sda 8:0 0 20G 0 disk ├─sda1 8:1 0 200M 0 part /boot/efi └─sda2 8:2 0 19.8G 0 part / sdb 8:16 0 20G 0 disk

Create a physical volume on /dev/sdb.

$ pvcreate /dev/sdb
  Physical volume "/dev/sdb" successfully created.
$ pvs
  PV         VG Fmt  Attr PSize   PFree
  /dev/sdb   vg lvm2 a--  <20.00g    0

$ pvcreate /dev/sdb Physical volume "/dev/sdb" successfully created. $ pvs PV VG Fmt Attr PSize PFree /dev/sdb vg lvm2 a-- <20.00g 0

Create a volume group called vg.

$ vgcreate vg /dev/sdb
  Volume group "vg" successfully created
$ vgs
  VG #PV #LV #SN Attr   VSize   VFree  
  vg   1   0   0 wz--n- <20.00g <20.00g

$ vgcreate vg /dev/sdb Volume group "vg" successfully created $ vgs VG #PV #LV #SN Attr VSize VFree vg 1 0 0 wz--n- <20.00g <20.00g

Create a 10GB logical volume group called data.

$ lvcreate -L 10G -n data vg
  Logical volume "data" created.
$ lvs
  LV   VG Attr       LSize  Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  data vg -wi-a----- 10.00g

$ lvcreate -L 10G -n data vg Logical volume "data" created. $ lvs LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert data vg -wi-a----- 10.00g

Format the volume group and mount it.

$ mkfs.xfs /dev/vg/data
meta-data=/dev/vg/data           isize=512    agcount=4, agsize=655360 blks
         =                       sectsz=4096  attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=1, rmapbt=0
         =                       reflink=1
data     =                       bsize=4096   blocks=2621440, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=4096  sunit=1 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
Discarding blocks...Done.
$ mount /dev/vg/data /mnt

$ mkfs.xfs /dev/vg/data meta-data=/dev/vg/data isize=512 agcount=4, agsize=655360 blks = sectsz=4096 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=1, rmapbt=0 = reflink=1 data = bsize=4096 blocks=2621440, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0, ftype=1 log =internal log bsize=4096 blocks=2560, version=2 = sectsz=4096 sunit=1 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 Discarding blocks...Done. $ mount /dev/vg/data /mnt

Check your logical volume. It says 10GB.

$ df -Th
Filesystem          Type      Size  Used Avail Use% Mounted on
devtmpfs            devtmpfs  385M     0  385M   0% /dev
tmpfs               tmpfs     403M     0  403M   0% /dev/shm
tmpfs               tmpfs     403M  5.5M  398M   2% /run
tmpfs               tmpfs     403M     0  403M   0% /sys/fs/cgroup
/dev/sda2           xfs        20G  2.9G   17G  15% /
/dev/sda1           vfat      200M  5.8M  195M   3% /boot/efi
tmpfs               tmpfs      81M     0   81M   0% /run/user/1000
/dev/mapper/vg-data xfs        10G  104M  9.9G   2% /mnt

$ df -Th Filesystem Type Size Used Avail Use% Mounted on devtmpfs devtmpfs 385M 0 385M 0% /dev tmpfs tmpfs 403M 0 403M 0% /dev/shm tmpfs tmpfs 403M 5.5M 398M 2% /run tmpfs tmpfs 403M 0 403M 0% /sys/fs/cgroup /dev/sda2 xfs 20G 2.9G 17G 15% / /dev/sda1 vfat 200M 5.8M 195M 3% /boot/efi tmpfs tmpfs 81M 0 81M 0% /run/user/1000 /dev/mapper/vg-data xfs 10G 104M 9.9G 2% /mnt

Let’s now extend the logical volume to 20GB.

$ lvextend -l +100%FREE /dev/vg/data
  Size of logical volume vg/data changed from 10.00 GiB (2560 extents) to <20.00 GiB (5119 extents).
  Logical volume vg/data successfully resized.

$ lvextend -l +100%FREE /dev/vg/data Size of logical volume vg/data changed from 10.00 GiB (2560 extents) to <20.00 GiB (5119 extents). Logical volume vg/data successfully resized.

Although lsblk says 20GB, our logical volume still says 10GB.

$ lsblk
NAME      MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda         8:0    0   20G  0 disk 
├─sda1      8:1    0  200M  0 part /boot/efi
└─sda2      8:2    0 19.8G  0 part /
sdb         8:16   0   20G  0 disk 
└─vg-data 253:0    0   20G  0 lvm  /mnt
$ df -Th
Filesystem          Type      Size  Used Avail Use% Mounted on
devtmpfs            devtmpfs  385M     0  385M   0% /dev
tmpfs               tmpfs     403M     0  403M   0% /dev/shm
tmpfs               tmpfs     403M  5.5M  398M   2% /run
tmpfs               tmpfs     403M     0  403M   0% /sys/fs/cgroup
/dev/sda2           xfs        20G  2.9G   17G  15% /
/dev/sda1           vfat      200M  5.8M  195M   3% /boot/efi
tmpfs               tmpfs      81M     0   81M   0% /run/user/1000
/dev/mapper/vg-data xfs        10G  104M  9.9G   2% /mnt

$ lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sda 8:0 0 20G 0 disk ├─sda1 8:1 0 200M 0 part /boot/efi └─sda2 8:2 0 19.8G 0 part / sdb 8:16 0 20G 0 disk └─vg-data 253:0 0 20G 0 lvm /mnt $ df -Th Filesystem Type Size Used Avail Use% Mounted on devtmpfs devtmpfs 385M 0 385M 0% /dev tmpfs tmpfs 403M 0 403M 0% /dev/shm tmpfs tmpfs 403M 5.5M 398M 2% /run tmpfs tmpfs 403M 0 403M 0% /sys/fs/cgroup /dev/sda2 xfs 20G 2.9G 17G 15% / /dev/sda1 vfat 200M 5.8M 195M 3% /boot/efi tmpfs tmpfs 81M 0 81M 0% /run/user/1000 /dev/mapper/vg-data xfs 10G 104M 9.9G 2% /mnt

We need to grow the file system.

$ xfs_growfs /dev/vg/data
meta-data=/dev/mapper/vg-data    isize=512    agcount=4, agsize=655360 blks
         =                       sectsz=4096  attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=1, rmapbt=0
         =                       reflink=1
data     =                       bsize=4096   blocks=2621440, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=4096  sunit=1 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
data blocks changed from 2621440 to 5241856

$ xfs_growfs /dev/vg/data meta-data=/dev/mapper/vg-data isize=512 agcount=4, agsize=655360 blks = sectsz=4096 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=1, rmapbt=0 = reflink=1 data = bsize=4096 blocks=2621440, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0, ftype=1 log =internal log bsize=4096 blocks=2560, version=2 = sectsz=4096 sunit=1 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 data blocks changed from 2621440 to 5241856

Let’s check again.

$ df -Th
Filesystem          Type      Size  Used Avail Use% Mounted on
devtmpfs            devtmpfs  385M     0  385M   0% /dev
tmpfs               tmpfs     403M     0  403M   0% /dev/shm
tmpfs               tmpfs     403M  5.5M  398M   2% /run
tmpfs               tmpfs     403M     0  403M   0% /sys/fs/cgroup
/dev/sda2           xfs        20G  2.9G   17G  15% /
/dev/sda1           vfat      200M  5.8M  195M   3% /boot/efi
tmpfs               tmpfs      81M     0   81M   0% /run/user/1000
/dev/mapper/vg-data xfs        20G  176M   20G   1% /mnt

$ df -Th Filesystem Type Size Used Avail Use% Mounted on devtmpfs devtmpfs 385M 0 385M 0% /dev tmpfs tmpfs 403M 0 403M 0% /dev/shm tmpfs tmpfs 403M 5.5M 398M 2% /run tmpfs tmpfs 403M 0 403M 0% /sys/fs/cgroup /dev/sda2 xfs 20G 2.9G 17G 15% / /dev/sda1 vfat 200M 5.8M 195M 3% /boot/efi tmpfs tmpfs 81M 0 81M 0% /run/user/1000 /dev/mapper/vg-data xfs 20G 176M 20G 1% /mnt

It now says 20GB.

Filed Under: Linux Tagged With: create, group, logical, lsblk, lvm, mkfs, physical, volume, xfs_growfs

EBS volume stuck in CloudFormation

November 12, 2020

When running CloudFormation, all the resources are being created with no problem. However it seems to be getting stuck at creating or mounting a volume. The CloudFormation fails and initiates a rollback. This is the error I am getting.

Volume attachment between volume-id vol-xxxxxxxx and instance-id i-xxxxxxx at device /dev/xvda is attaching

Volume attachment between volume-id vol-xxxxxxxx and instance-id i-xxxxxxx at device /dev/xvda is attaching

This turned out to be a conflict on HVM EC2 instances because /dev/sda1 is being remapped to /dev/xvda. My second drive is also mapped to /dev/xvda. The fix was to simply to map it slightly different to avoid mapping conflict.

Here’s the original mapping.

Boot:   /dev/xvda
Device: /dev/xvda
Device: /dev/xvdb

Boot: /dev/xvda Device: /dev/xvda Device: /dev/xvdb

Here’s the fix.

Boot:   /dev/xvda
Device: /dev/xvdb
Device: /dev/xvdc

Boot: /dev/xvda Device: /dev/xvdb Device: /dev/xvdc

Filed Under: Cloud Tagged With: aws, cloudformation, conflict, drive, mapping, template, volume

GCP Identify Your Disks

September 16, 2020

Here’s how to identify your disks on the system to cross check with GCP Dashboard.

ls -l /dev/disk/by-id

ls -l /dev/disk/by-id

Result:

[root@servername ~]# ls -l /dev/disk/by-id
total 0
lrwxrwxrwx. 1 root root  9 Sep 15 05:30 google-boot -> ../../sda
lrwxrwxrwx. 1 root root 10 Sep 15 05:30 google-boot-part1 -> ../../sda1
lrwxrwxrwx. 1 root root  9 Sep 15 05:30 google-persistent-disk-1 -> ../../sdb
lrwxrwxrwx. 1 root root  9 Sep 15 05:30 scsi-0Google_PersistentDisk_boot -> ../../sda
lrwxrwxrwx. 1 root root 10 Sep 15 05:30 scsi-0Google_PersistentDisk_boot-part1 -> ../../sda1
lrwxrwxrwx. 1 root root  9 Sep 15 05:30 scsi-0Google_PersistentDisk_persistent-disk-1 -> ../../sdb

[root@servername ~]# ls -l /dev/disk/by-id total 0 lrwxrwxrwx. 1 root root 9 Sep 15 05:30 google-boot -> ../../sda lrwxrwxrwx. 1 root root 10 Sep 15 05:30 google-boot-part1 -> ../../sda1 lrwxrwxrwx. 1 root root 9 Sep 15 05:30 google-persistent-disk-1 -> ../../sdb lrwxrwxrwx. 1 root root 9 Sep 15 05:30 scsi-0Google_PersistentDisk_boot -> ../../sda lrwxrwxrwx. 1 root root 10 Sep 15 05:30 scsi-0Google_PersistentDisk_boot-part1 -> ../../sda1 lrwxrwxrwx. 1 root root 9 Sep 15 05:30 scsi-0Google_PersistentDisk_persistent-disk-1 -> ../../sdb

Filed Under: Cloud Tagged With: device, disks, gcp, identify, name, volume

Encrypt Volume via Terraform

April 9, 2019

Here’s the Terraform script to encrypt an unencrypted volume. It creates a snapshot, encrypts a snapshot, and encrypts the volume.

#
# Set Variables
#
variable "volume" {
  description = "The Volume to encrypt: vol-12345678901234567"
}
variable "region" {
  description = "The Region: us-east-2"
}
variable "az" {
  description = "The AZ: us-east-2a"
}
 
#
# Set Credentials
#
provider "aws" {
	access_key = "put-your-access-key-here"
	secret_key = "put-your-secret-key-here"
	region = "${var.region}"
}
 
/*
#
# Create Unencrypted Volume
#
resource "aws_ebs_volume" "unencrypted_volume" {
  availability_zone = "${var.az}"
  size              = 10
  tags = {
    Name = "Unencrypted_Volume"
  }
}
*/
 
#
# Create Unencrypted Snapshot
#
resource "aws_ebs_snapshot" "unencrypted_snapshot" {
  #volume_id = "${aws_ebs_volume.unencrypted_volume.id}"
  volume_id = "${var.volume}"
  tags = {
    Name = "Unencrypted_Snapshot"
  }
}
 
#
# Create Encrypted Snapshot
#
resource "aws_ebs_snapshot_copy" "encrypted_snapshot" {
  source_snapshot_id = "${aws_ebs_snapshot.unencrypted_snapshot.id}"
  source_region      = "${var.region}"
  encrypted = true
  tags = {
    Name = "Encrypted_Snapshot"
  }
}
 
#
# Created Encrypted Volume
#
resource "aws_ebs_volume" "encrypted_volume" {
  availability_zone = "${var.az}"
  snapshot_id = "${aws_ebs_snapshot_copy.encrypted_snapshot.id}"
  tags = {
    Name = "Encrypted_Volume"
  }
}

# # Set Variables # variable "volume" { description = "The Volume to encrypt: vol-12345678901234567" } variable "region" { description = "The Region: us-east-2" } variable "az" { description = "The AZ: us-east-2a" } # # Set Credentials # provider "aws" { access_key = "put-your-access-key-here" secret_key = "put-your-secret-key-here" region = "${var.region}" } /* # # Create Unencrypted Volume # resource "aws_ebs_volume" "unencrypted_volume" { availability_zone = "${var.az}" size = 10 tags = { Name = "Unencrypted_Volume" } } */ # # Create Unencrypted Snapshot # resource "aws_ebs_snapshot" "unencrypted_snapshot" { #volume_id = "${aws_ebs_volume.unencrypted_volume.id}" volume_id = "${var.volume}" tags = { Name = "Unencrypted_Snapshot" } } # # Create Encrypted Snapshot # resource "aws_ebs_snapshot_copy" "encrypted_snapshot" { source_snapshot_id = "${aws_ebs_snapshot.unencrypted_snapshot.id}" source_region = "${var.region}" encrypted = true tags = { Name = "Encrypted_Snapshot" } } # # Created Encrypted Volume # resource "aws_ebs_volume" "encrypted_volume" { availability_zone = "${var.az}" snapshot_id = "${aws_ebs_snapshot_copy.encrypted_snapshot.id}" tags = { Name = "Encrypted_Volume" } }

Filed Under: Linux Tagged With: encrypt, snapshot, terraform, volume

LVM Extend Physical Volume

March 14, 2019

The following are instructions on how to extend a disk volume with LVM.

# aws cli extend volume
aws ec2 modify-volume --region us-east-1 \
--volume-id vol-xxxxxxxxxxxx --size 15 \
--volume-type gp2
# check volumes before
lsblk
# extend via growpart
growpart /dev/xvdf 1
# check volumes after
lsblk
# resize physical volume
pvresize /dev/xvdf1
# check physical volume
pvscan
# extend logical volume
lvresize -l +100%FREE /dev/data/data
# check logical volume
lvscan
# check mounts
df -Th
# resize file system
resize2fs /dev/data/data
# or
xfs_growfs /dev/mapper/root
xfs_growfs /dev/xvda1
# check mounts again for new size
df -Th

# aws cli extend volume aws ec2 modify-volume --region us-east-1 \ --volume-id vol-xxxxxxxxxxxx --size 15 \ --volume-type gp2 # check volumes before lsblk # extend via growpart growpart /dev/xvdf 1 # check volumes after lsblk # resize physical volume pvresize /dev/xvdf1 # check physical volume pvscan # extend logical volume lvresize -l +100%FREE /dev/data/data # check logical volume lvscan # check mounts df -Th # resize file system resize2fs /dev/data/data # or xfs_growfs /dev/mapper/root xfs_growfs /dev/xvda1 # check mounts again for new size df -Th

Filed Under: Cloud, Linux Tagged With: disk, extend, lvm, physical, volume

  • Go to page 1
  • Go to page 2
  • Go to Next Page »
  • Home
  • About
  • Archives

Copyright © 2023