<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>DEV Community: Troy</title>
    <description>The latest articles on DEV Community by Troy (@dietertroy).</description>
    <link>https://dev.to/dietertroy</link>
    
    <atom:link rel="self" type="application/rss+xml" href="https://dev.to/feed/dietertroy"/>
    <language>en</language>
    <item>
      <title>AWS Certified Security: Specialty recap</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Fri, 11 Dec 2020 02:18:14 +0000</pubDate>
      <link>https://dev.to/dietertroy/aws-certified-security-specialty-recap-32jh</link>
      <guid>https://dev.to/dietertroy/aws-certified-security-specialty-recap-32jh</guid>
      <description>&lt;h1&gt;
  
  
  AWS Certified Security: Specialty
&lt;/h1&gt;

&lt;h2&gt;
  
  
  Overview
&lt;/h2&gt;

&lt;p&gt;The AWS Certified Security: Specialty is geared towards individuals who focus primarily on security within AWS. I found that as a solutions architect, it is extremely important to have security at the forefront of my mind while building. Building an environment secure initially reduces the risk of failing compliance and potential exploitation at production launch.&lt;/p&gt;

&lt;h2&gt;
  
  
  Exam components
&lt;/h2&gt;

&lt;p&gt;I found that an exam-taker should focus heavily on the following resources:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--7q8cVWw2--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-KMS_primary.png" alt="" width="25" height="25"&gt; &lt;a href="https://www.troydieter.com/post/awssec-guide/#aws-kms"&gt;AWS KMS&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--m7cTSAfX--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-IAM_primary.png" alt="" width="25" height="25"&gt; &lt;a href="https://www.troydieter.com/post/awssec-guide/#aws-iam"&gt;AWS IAM&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--AE55ekTl--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/Amazon-EC2_primary.png" alt="" width="25" height="25"&gt; &lt;a href="https://www.troydieter.com/post/awssec-guide/#amazon-ec2"&gt;Amazon EC2&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--6YM5qesH--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-CloudTrail_primary.png" alt="" width="25" height="25"&gt; &lt;a href="https://www.troydieter.com/post/awssec-guide/#aws-cloudtrail"&gt;AWS Cloudtrail&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--QZD8HNFr--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/Amazon-CloudWatch-Events_primary.png" alt="" width="25" height="25"&gt; &lt;a href="https://www.troydieter.com/post/awssec-guide/#amazon-cloudwatch-events"&gt;Amazon CloudWatch Events&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--rrO5OKT0--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/Amazon-Inspector_primary.png" alt="" width="25" height="25"&gt; &lt;a href="https://www.troydieter.com/post/awssec-guide/#aws-inspector"&gt;AWS Inspector&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--WzF7dTsR--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-WAF_primary.png" alt="" width="25" height="25"&gt; &lt;a href="https://www.troydieter.com/post/awssec-guide/#aws-waf"&gt;AWS WAF&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--0wTC7Tb8--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-Shield_primary.png" alt="" width="25" height="25"&gt; &lt;a href="https://www.troydieter.com/post/awssec-guide/#aws-shield"&gt;AWS Shield &amp;amp; Shield Advanced&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--KzsvA3i8--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-Config_primary.png" alt="" width="25" height="25"&gt; &lt;a href="https://www.troydieter.com/post/awssec-guide/#aws-config"&gt;AWS Config&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--jevgkep7--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-Organizations_primary.png" alt="" width="25" height="25"&gt; &lt;a href="https://www.troydieter.com/post/awssec-guide/#aws-organizations"&gt;AWS Organizations&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Exam components (AWS Documentation)
&lt;/h2&gt;

&lt;h3&gt;
  
  
  AWS KMS
&lt;/h3&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--7q8cVWw2--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-KMS_primary.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--7q8cVWw2--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-KMS_primary.png" alt="" width="25" height="25"&gt;&lt;/a&gt; &lt;em&gt;AWS Key Management Service&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/kms"&gt;Product documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/kms"&gt;Development documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-security.html"&gt;Security documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/general/latest/gr/kms.html"&gt;Quotas (Limits)&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/kms/pricing"&gt;Pricing&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/kms/sla/"&gt;SLA&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  -   &lt;a href="https://aws.amazon.com/kms/faqs/"&gt;FAQ&lt;/a&gt;
&lt;/h2&gt;

&lt;h3&gt;
  
  
  AWS IAM
&lt;/h3&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--m7cTSAfX--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-IAM_primary.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--m7cTSAfX--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-IAM_primary.png" alt="" width="25" height="25"&gt;&lt;/a&gt; &lt;em&gt;AWS Identity and Access Management (IAM)&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/iam"&gt;Product documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/iam"&gt;Development documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/security.html"&gt;Security documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/general/latest/gr/iam-service.html"&gt;Quotas (Limits)&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  -   &lt;a href="https://aws.amazon.com/iam/faqs/"&gt;FAQ&lt;/a&gt;
&lt;/h2&gt;

&lt;h3&gt;
  
  
  Amazon EC2
&lt;/h3&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--AE55ekTl--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/Amazon-EC2_primary.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--AE55ekTl--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/Amazon-EC2_primary.png" alt="" width="25" height="25"&gt;&lt;/a&gt; &lt;em&gt;Amazon Elastic Compute Cloud&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/ec2"&gt;Product documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/ec2"&gt;Development documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security.html"&gt;Security documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/general/latest/gr/ec2-service.html"&gt;Quotas (Limits)&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/ec2/pricing"&gt;Pricing&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/ec2/sla/"&gt;SLA&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  -   &lt;a href="https://aws.amazon.com/ec2/faqs/"&gt;FAQ&lt;/a&gt;
&lt;/h2&gt;

&lt;h3&gt;
  
  
  AWS Cloudtrail
&lt;/h3&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--6YM5qesH--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-CloudTrail_primary.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--6YM5qesH--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-CloudTrail_primary.png" alt="" width="25" height="25"&gt;&lt;/a&gt; &lt;em&gt;AWS Cloudtrail&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/cloudtrail"&gt;Product documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/cloudtrail"&gt;Development documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Security.html"&gt;Security documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/general/latest/gr/ct.html"&gt;Quotas (Limits)&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/cloudtrail/pricing"&gt;Pricing&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/cloudtrail/sla/"&gt;SLA&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  -   &lt;a href="https://aws.amazon.com/cloudtrail/faqs/"&gt;FAQ&lt;/a&gt;
&lt;/h2&gt;

&lt;h3&gt;
  
  
  Amazon CloudWatch Events
&lt;/h3&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--QZD8HNFr--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/Amazon-CloudWatch-Events_primary.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--QZD8HNFr--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/Amazon-CloudWatch-Events_primary.png" alt="" width="25" height="25"&gt;&lt;/a&gt; &lt;em&gt;Amazon CloudWatch Events&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/auth-and-access-control-cwe.html"&gt;Security documentation&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  -   &lt;a href="https://docs.aws.amazon.com/general/latest/gr/cwe_region.html"&gt;Quotas (Limits)&lt;/a&gt;
&lt;/h2&gt;

&lt;h3&gt;
  
  
  Amazon Inspector
&lt;/h3&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--rrO5OKT0--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/Amazon-Inspector_primary.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--rrO5OKT0--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/Amazon-Inspector_primary.png" alt="" width="25" height="25"&gt;&lt;/a&gt; &lt;em&gt;Amazon Inspector&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/inspector"&gt;Product documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/inspector"&gt;Development documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/inspector/latest/userguide/security.html"&gt;Security documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/general/latest/gr/inspector.html"&gt;Quotas (Limits)&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/inspector/pricing"&gt;Pricing&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/inspector/sla/"&gt;SLA&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  -   &lt;a href="https://aws.amazon.com/inspector/faqs/"&gt;FAQ&lt;/a&gt;
&lt;/h2&gt;

&lt;h3&gt;
  
  
  AWS WAF
&lt;/h3&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--WzF7dTsR--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-WAF_primary.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--WzF7dTsR--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-WAF_primary.png" alt="" width="25" height="25"&gt;&lt;/a&gt; &lt;em&gt;AWS Web Application Firewall&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/waf"&gt;Product documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/waf"&gt;Development documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/waf/latest/developerguide/security.html"&gt;Security documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/general/latest/gr/waf.html"&gt;Quotas (Limits)&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/waf/pricing"&gt;Pricing&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/waf/sla/"&gt;SLA&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  -   &lt;a href="https://aws.amazon.com/waf/faq/"&gt;FAQ&lt;/a&gt;
&lt;/h2&gt;

&lt;h3&gt;
  
  
  AWS Shield
&lt;/h3&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--0wTC7Tb8--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-Shield_primary.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--0wTC7Tb8--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-Shield_primary.png" alt="" width="25" height="25"&gt;&lt;/a&gt; &lt;em&gt;AWS Shield&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/shield"&gt;Product documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/shield"&gt;Development documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/waf/latest/developerguide/shd-security.html"&gt;Security documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/general/latest/gr/shield.html"&gt;Quotas (Limits)&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/shield/pricing"&gt;Pricing&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/shield/sla/"&gt;SLA&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  -   &lt;a href="https://aws.amazon.com/shield/faqs/"&gt;FAQ&lt;/a&gt;
&lt;/h2&gt;

&lt;h3&gt;
  
  
  AWS Config
&lt;/h3&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--KzsvA3i8--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-Config_primary.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--KzsvA3i8--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-Config_primary.png" alt="" width="25" height="25"&gt;&lt;/a&gt; &lt;em&gt;AWS Config&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/config"&gt;Product documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/config"&gt;Development documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/config/latest/developerguide/security.html"&gt;Security documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/general/latest/gr/awsconfig.html"&gt;Quotas (Limits)&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/config/pricing"&gt;Pricing&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/config/sla/"&gt;SLA&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  -   &lt;a href="https://aws.amazon.com/config/faq/"&gt;FAQ&lt;/a&gt;
&lt;/h2&gt;

&lt;h3&gt;
  
  
  AWS Organizations
&lt;/h3&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--jevgkep7--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-Organizations_primary.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--jevgkep7--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/images/AWS-Organizations_primary.png" alt="" width="25" height="25"&gt;&lt;/a&gt; &lt;em&gt;AWS Organizations&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/organizations"&gt;Product documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/organizations"&gt;Development documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/organizations/latest/userguide/security.html"&gt;Security documentation&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://docs.aws.amazon.com/general/latest/gr/ao.html"&gt;Quotas (Limits)&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  &lt;a href="https://aws.amazon.com/organizations"&gt;Pricing&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  -   &lt;a href="https://aws.amazon.com/organizations/faqs/"&gt;FAQ&lt;/a&gt;
&lt;/h2&gt;

&lt;h2&gt;
  
  
  Training Resources
&lt;/h2&gt;

&lt;p&gt;The following training resources helped prepare for the above components:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;a href="https://www.whizlabs.com/aws-certified-security-specialty/"&gt;WhizLabs - AWS Certified Security - Specialty Course&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href="https://www.udemy.com/course/aws-certified-security-specialty/"&gt;uDemy AWS Certified Security - Specialty Course&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href="https://aws.amazon.com/certification/certified-security-specialty/"&gt;Official AWS Exam Page&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Future Reading
&lt;/h2&gt;

&lt;p&gt;Please come join me &lt;a href="https://www.troydieter.com"&gt;over at my personal site&lt;/a&gt;, in which I cover varying AWS related topics and more!&lt;/p&gt;

</description>
      <category>aws</category>
      <category>security</category>
      <category>secops</category>
      <category>certified</category>
    </item>
    <item>
      <title>Using aws-auto-cleanup to keep an AWS test account neat &amp; tidy (and minimal cost!)</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Tue, 22 Sep 2020 01:56:36 +0000</pubDate>
      <link>https://dev.to/dietertroy/using-aws-auto-cleanup-to-keep-an-aws-test-account-neat-tidy-and-minimal-cost-3c87</link>
      <guid>https://dev.to/dietertroy/using-aws-auto-cleanup-to-keep-an-aws-test-account-neat-tidy-and-minimal-cost-3c87</guid>
      <description>&lt;h2&gt;
  
  
  aws-auto-cleanup
&lt;/h2&gt;

&lt;h3&gt;
  
  
  Using aws-auto-cleanup to keep an AWS test account neat &amp;amp; tidy (and minimal cost!)
&lt;/h3&gt;

&lt;h4&gt;
  
  
  Functional Requirements
&lt;/h4&gt;

&lt;ul&gt;
&lt;li&gt;Reduce operational run-time of resources used for a testing\development, or for temporary spin-up of resources&lt;/li&gt;
&lt;li&gt;Reduce cost\operational expenses&lt;/li&gt;
&lt;li&gt;Ability to whitelist AWS resources that need to be retained&lt;/li&gt;
&lt;/ul&gt;

&lt;h4&gt;
  
  
  Operating Cost
&lt;/h4&gt;

&lt;p&gt;&amp;lt; $2.00/mo for the following:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;AWS::Events::Rule &lt;/li&gt;
&lt;li&gt;AWS::Lambda::Function &lt;/li&gt;
&lt;li&gt;AWS::Lambda::Permission&lt;/li&gt;
&lt;li&gt;AWS::Logs::LogGroup &lt;/li&gt;
&lt;li&gt;AWS::IAM::Role &lt;/li&gt;
&lt;li&gt;AWS::S3::Bucket &lt;/li&gt;
&lt;li&gt;AWS::S3::Bucket&lt;/li&gt;
&lt;li&gt;AWS::DynamoDB::Table &lt;/li&gt;
&lt;li&gt;AWS::DynamoDB::Table&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="table-wrapper-paragraph"&gt;&lt;table&gt;
&lt;thead&gt;
&lt;tr&gt;
&lt;th&gt;Node&lt;/th&gt;
&lt;th&gt;Type&lt;/th&gt;
&lt;th&gt;Unit&lt;/th&gt;
&lt;th&gt;Usage&lt;/th&gt;
&lt;th&gt;Total&lt;/th&gt;
&lt;/tr&gt;
&lt;/thead&gt;
&lt;tbody&gt;
&lt;tr&gt;
&lt;td&gt;auto-cleanup-settings-prod (DynamoDB-Table)&lt;/td&gt;
&lt;td&gt;provisioned read&lt;/td&gt;
&lt;td&gt;$0.00013 per hour for units of read capacity&lt;/td&gt;
&lt;td&gt;1 units per month&lt;/td&gt;
&lt;td&gt;$0.10&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;auto-cleanup-settings-prod (DynamoDB-Table)&lt;/td&gt;
&lt;td&gt;provisioned write&lt;/td&gt;
&lt;td&gt;$0.00065 per hour for units of write capacity&lt;/td&gt;
&lt;td&gt;1 units per month&lt;/td&gt;
&lt;td&gt;$0.48&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;auto-cleanup-settings-prod (DynamoDB-Table)&lt;/td&gt;
&lt;td&gt;storage&lt;/td&gt;
&lt;td&gt;$0.25 per GB-month&lt;/td&gt;
&lt;td&gt;1 GB&lt;/td&gt;
&lt;td&gt;$0.25&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;auto-cleanup-whitelist-prod (DynamoDB-Table)&lt;/td&gt;
&lt;td&gt;provisioned read&lt;/td&gt;
&lt;td&gt;$0.00013 per hour for units of read capacity&lt;/td&gt;
&lt;td&gt;1 units per month&lt;/td&gt;
&lt;td&gt;$0.10&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;auto-cleanup-whitelist-prod (DynamoDB-Table)&lt;/td&gt;
&lt;td&gt;provisioned write&lt;/td&gt;
&lt;td&gt;$0.00065 per hour for units of write capacity&lt;/td&gt;
&lt;td&gt;1 units per month&lt;/td&gt;
&lt;td&gt;$0.48&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;auto-cleanup-whitelist-prod (DynamoDB-Table)&lt;/td&gt;
&lt;td&gt;storage&lt;/td&gt;
&lt;td&gt;$0.25 per GB-month&lt;/td&gt;
&lt;td&gt;1 GB&lt;/td&gt;
&lt;td&gt;$0.25&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;auto-cleanup-prod (Lambda-Function)&lt;/td&gt;
&lt;td&gt;1000&lt;/td&gt;
&lt;td&gt;128MB * AvgTime * Invocations per month (Usage)&lt;/td&gt;
&lt;td&gt;10000 invocations&lt;/td&gt;
&lt;td&gt;$0.02&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;ServerlessDeploymentBucket (S3-Bucket)&lt;/td&gt;
&lt;td&gt;Standard&lt;/td&gt;
&lt;td&gt;$0.023 per GB - first 50 TB / month of storage used&lt;/td&gt;
&lt;td&gt;10 GB&lt;/td&gt;
&lt;td&gt;$0.23&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;auto-cleanup-prod-resourcetreebucket-troydieter (S3-Bucket)&lt;/td&gt;
&lt;td&gt;Standard&lt;/td&gt;
&lt;td&gt;$0.023 per GB - first 50 TB / month of storage used&lt;/td&gt;
&lt;td&gt;10 GB&lt;/td&gt;
&lt;td&gt;$0.23&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Dynamo Storage&lt;/td&gt;
&lt;td&gt;Discount&lt;/td&gt;
&lt;td&gt;First 25GB Free (-$0.25 per GB)&lt;/td&gt;
&lt;td&gt;-2&lt;/td&gt;
&lt;td&gt;($0.50)&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Dynamo Backup&lt;/td&gt;
&lt;td&gt;Point In Time Recovery&lt;/td&gt;
&lt;td&gt;$0.20 per GB-month (Continuous backups)&lt;/td&gt;
&lt;td&gt;0&lt;/td&gt;
&lt;td&gt;$0.00&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Dynamo Backup&lt;/td&gt;
&lt;td&gt;OnDemand&lt;/td&gt;
&lt;td&gt;$0.10 per GB-month (On Demand)&lt;/td&gt;
&lt;td&gt;0&lt;/td&gt;
&lt;td&gt;$0.00&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Dynamo Network Outbound Traffic&lt;/td&gt;
&lt;td&gt;Transfer&lt;/td&gt;
&lt;td&gt;$0.09 per GB (Data Transfer Out)&lt;/td&gt;
&lt;td&gt;1&lt;/td&gt;
&lt;td&gt;$0.09&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Lambda Service&lt;/td&gt;
&lt;td&gt;Number of invocations&lt;/td&gt;
&lt;td&gt;Invocation call for a Lambda function&lt;/td&gt;
&lt;td&gt;10000&lt;/td&gt;
&lt;td&gt;$0.00&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;
&lt;/table&gt;&lt;/div&gt;

&lt;h4&gt;
  
  
  Diagram
&lt;/h4&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fimages%2Faws_cleanup_diagram.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fimages%2Faws_cleanup_diagram.png" alt="diagram"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h4&gt;
  
  
  Stack
&lt;/h4&gt;

&lt;ol&gt;
&lt;li&gt;
&lt;a href="https://serverless.com/" rel="noopener noreferrer"&gt;Serverless Stack&lt;/a&gt;
&lt;code&gt;Framework Core: 2.1.1
Plugin: 4.0.4
SDK: 2.3.2
Components: 3.1.3&lt;/code&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;a href="https://aws.amazon.com/cli/" rel="noopener noreferrer"&gt;AWS CLI&lt;/a&gt; &lt;/li&gt;
&lt;li&gt;&lt;a href="https://www.npmjs.com/get-npm" rel="noopener noreferrer"&gt;NPM&lt;/a&gt;&lt;/li&gt;
&lt;/ol&gt;

&lt;h4&gt;
  
  
  Preferred configuration
&lt;/h4&gt;

&lt;p&gt;After you clone the aws-auto-cleanup repository in the next step, you will have the opportunity to change the parameters of the serverless-stack deployment. A few notable changes:&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Change the serverless.yml file line 4, to your company name or project name.&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Within the &lt;code&gt;auto_cleanup/data/auto-cleanup-settings.json&lt;/code&gt; file, you will find the default parameters for the function. I have (obviously) found that the TTL (time-to-live) of 7 days to be too low. I have raised that to 120 days. Example:&lt;/p&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;    "ttl": {
      "N": "120"
    }
&lt;/code&gt;&lt;/pre&gt;
&lt;/li&gt;
&lt;/ol&gt;

&lt;h4&gt;
  
  
  Deploy
&lt;/h4&gt;

&lt;p&gt;Following the directions listed on the README, over &lt;a href="https://github.com/servian/aws-auto-cleanup" rel="noopener noreferrer"&gt;at the aws-auto-cleanup Github page!&lt;/a&gt;&lt;/p&gt;

&lt;h4&gt;
  
  
  Useful commands
&lt;/h4&gt;


&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;serverless deploy --region region-example --aws-profile profile-example&lt;br&gt;
serverless invoke --function AutoCleanup --region region-example --aws-profile profile-example --type Event&lt;br&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;
&lt;h4&gt;
&lt;br&gt;
  &lt;br&gt;
  &lt;br&gt;
  Outputs&lt;br&gt;
&lt;/h4&gt;

&lt;ul&gt;
&lt;li&gt;Under CloudWatch, you'll see under Logs &amp;gt; Log Groups the following log group (if you kept the default Lambda function name):&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;code&gt;/aws/lambda/auto-cleanup-prod&lt;/code&gt;&lt;/p&gt;

&lt;p&gt;With a log stream of the latest Serverless invocation (or scheduled Lambda execution):&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;| 1600638849212 | [INFO] S3 Bucket&lt;br&gt;
'outbound-email-send-dev-serverlessdeploymentbucke-46346' was created&lt;br&gt;
234 days ago and has been deleted. (s3_cleanup.py, buckets(), line&lt;br&gt;
168)&lt;br&gt;&lt;br&gt;
| | 1600638849212 | [INFO] S3 Bucket&lt;br&gt;
'outbound-email-send-dev-serverlessdeploymentbucke-46346' was created&lt;br&gt;
234 days ago and has been deleted. (s3_cleanup.py, buckets(), line&lt;br&gt;
168)&lt;br&gt;&lt;br&gt;
| | 1600638849212 | [INFO] S3 Bucket&lt;br&gt;
'outbound-email-send-hand-serverlessdeploymentbuck-46346' was created&lt;br&gt;
234 days ago and has been deleted. (s3_cleanup.py, buckets(), line&lt;br&gt;
168)&lt;br&gt;&lt;br&gt;
| | 1600638849212 | [INFO] S3 Bucket&lt;br&gt;
'outbound-email-send-hand-serverlessdeploymentbuck-46346' was created&lt;br&gt;
234 days ago and has been deleted. (s3_cleanup.py, buckets(), line&lt;br&gt;
168)&lt;br&gt;&lt;br&gt;
| | 1600638849212 | [INFO] S3 Bucket&lt;br&gt;
'46346-ai-chat-bot-dev-serverlessdeploymentbucket-46346' was created&lt;br&gt;
230 days ago and has been deleted. (s3_cleanup.py, buckets(), line&lt;br&gt;
168)&lt;br&gt;&lt;br&gt;
| | 1600638849268 | [INFO] S3 Bucket '46346-artifacts' was created 505&lt;br&gt;
days ago and has been deleted. (s3_cleanup.py, buckets(), line 168)&lt;br&gt;&lt;br&gt;
| | 1600638849269 | [INFO] S3 Bucket '46346-cpds-infra' was created&lt;br&gt;
494 days ago and has been deleted. (s3_cleanup.py, buckets(), line&lt;br&gt;
168)&lt;br&gt;&lt;br&gt;
| | 1600638849269 | [INFO] S3 Bucket '46346-sree52-testbkt' was&lt;br&gt;
created 430 days ago and has been deleted. (s3_cleanup.py, buckets(),&lt;br&gt;
line 168)&lt;br&gt;&lt;br&gt;
| | 1600638849269 | [INFO] S3 Bucket 'python-myservice-bucket' was&lt;br&gt;
created 158 days ago and has been deleted. (s3_cleanup.py, buckets(),&lt;br&gt;
line 168)&lt;br&gt;&lt;br&gt;
| | 1600638849269 | [INFO] S3 Bucket 'python-poc-deployment-bucket'&lt;br&gt;
was created 166 days ago and has been deleted. (s3_cleanup.py,&lt;br&gt;
buckets(), line 168)&lt;br&gt;&lt;br&gt;
| | 1600638849269 | [INFO] S3 Bucket 's3-to-sns' was created 264 days&lt;br&gt;
ago and has been deleted. (s3_cleanup.py, buckets(), line 168)&lt;br&gt;&lt;br&gt;
| | 1600638849269 | [INFO] S3 Bucket&lt;br&gt;
'serverless-telegram-bot-serverlessdeploymentbuck-46346' was created&lt;br&gt;
236 days ago and has been deleted. (s3_cleanup.py, buckets(), line&lt;br&gt;
168)&lt;br&gt;&lt;br&gt;
| | 1600638849269 | [INFO] S3 Bucket 'swagger-bucket-1' was created&lt;br&gt;
199 days ago and has been deleted. (s3_cleanup.py, buckets(), line&lt;br&gt;
168)&lt;br&gt;&lt;br&gt;
| | 1600638849269 | [INFO] S3 Bucket 'terraform-bucket-lokesh' was&lt;br&gt;
created 243 days ago and has been deleted. (s3_cleanup.py, buckets(),&lt;br&gt;
line 168)&lt;br&gt;&lt;br&gt;
| | 1600638849270 | [INFO] S3 Bucket&lt;br&gt;
'twilio-voice-test-dev-serverlessdeploymentbucket-46346' was created&lt;br&gt;
230 days ago and has been deleted. (s3_cleanup.py, buckets(), line&lt;br&gt;
168)&lt;/p&gt;
&lt;/blockquote&gt;

&lt;h4&gt;
  
  
  Performing clean-up
&lt;/h4&gt;

&lt;ol&gt;
&lt;li&gt;
&lt;p&gt;Change the &lt;code&gt;dry_run&lt;/code&gt; item in the DynamoDB table (&lt;code&gt;auto-cleanup-settings-prod&lt;/code&gt; - if you kept the default settings name) to false to perform clean-up:&lt;/p&gt;

&lt;p&gt;&lt;code&gt;{  "dry_run" : { "BOOL" : false }}&lt;/code&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Invoke using:&lt;/li&gt;
&lt;/ol&gt;


&lt;/li&gt;

&lt;/ol&gt;

&lt;p&gt;&lt;code&gt;serverless invoke --function AutoCleanup --region region-example --aws-profile profile-example --type Event&lt;/code&gt;&lt;/p&gt;

&lt;h4&gt;
  
  
  Recap
&lt;/h4&gt;

&lt;p&gt;&lt;a href="https://github.com/servian/aws-auto-cleanup" rel="noopener noreferrer"&gt;aws-auto-cleanup&lt;/a&gt; helps to keep multi-region AWS accounts clean. Resources are either white-listed, or deleted after specified amount of days. This reduces monthly expenses and conflicting resources within the same region!&lt;/p&gt;

</description>
      <category>aws</category>
      <category>lambda</category>
      <category>dynamodb</category>
      <category>serverless</category>
    </item>
    <item>
      <title>sharedsecret - an AWS-native secret sharing service</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Tue, 04 Aug 2020 05:16:32 +0000</pubDate>
      <link>https://dev.to/dietertroy/sharedsecret-an-aws-native-secret-sharing-service-58n4</link>
      <guid>https://dev.to/dietertroy/sharedsecret-an-aws-native-secret-sharing-service-58n4</guid>
      <description>&lt;p&gt;&lt;strong&gt;Goal&lt;/strong&gt;&lt;br&gt;
Create an end-to-end fully encrypted, publicly accessible secret storage tool&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Components&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;AWS Code Commit (Ruby v2.7) - a fork of &lt;a href="https://github.com/onetimesecret/onetimesecret"&gt;OneTimeSecret&lt;/a&gt; with additional features, more developed UX &amp;amp; mobile capabilities.&lt;/li&gt;
&lt;li&gt;AWS Elastic Beanstalk (which includes EC2 instances deployed with boot-strap configurations defined in the beanstalk configuration)&lt;/li&gt;
&lt;li&gt;AWS S3 (Terraform Remote State, misc. components)&lt;/li&gt;
&lt;li&gt;AWS Elasticache (v3.2.10)&lt;/li&gt;
&lt;li&gt;AWS ELB (Application Load Balancer)&lt;/li&gt;
&lt;li&gt;AWS ACM (Amazon Certificate Manager)&lt;/li&gt;
&lt;li&gt;AWS Route 53 (DNS Management)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Deployment Model&lt;/strong&gt;&lt;br&gt;
The goal of the deployment is to deploy all initial platform components through Terraform. Due to the complexity/work-exerted model for Elastic Beanstalk, those components may be deployed manually.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Deployment Overview&lt;/strong&gt;&lt;br&gt;
&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--Gzbljlek--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://dev-to-uploads.s3.amazonaws.com/i/zoxnwxb0u1ehs5fhyfes.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--Gzbljlek--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://dev-to-uploads.s3.amazonaws.com/i/zoxnwxb0u1ehs5fhyfes.png" alt="secret_deplyoment"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Feedback&lt;/strong&gt;&lt;br&gt;
Looking for general feedback - not looking to monetize.&lt;/p&gt;

</description>
      <category>aws</category>
      <category>secret</category>
      <category>encryption</category>
      <category>share</category>
    </item>
    <item>
      <title>AWS Certified Big Data: Specialty study blueprint</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Tue, 31 Dec 2019 15:43:05 +0000</pubDate>
      <link>https://dev.to/dietertroy/aws-certified-big-data-specialty-study-blueprint-494g</link>
      <guid>https://dev.to/dietertroy/aws-certified-big-data-specialty-study-blueprint-494g</guid>
      <description>&lt;h1&gt;
  
  
  AWS Certified Big Data: Specialty study outline
&lt;/h1&gt;

&lt;p&gt;In another installment of study blueprints for AWS certification exams; I am happy to provide my suggested outline for what I used to pass the AWS Certified Big Data Specialty certification in December 2019. &lt;/p&gt;

&lt;p&gt;Deep-dive re:Invent videos:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;a href="https://www.youtube.com/playlist?list=PLb5aX5hRnKxN-nDMIyECsudxGPrh67_RR"&gt;AWS re:Invent 2018: Big Data Analytics Architectural Patterns &amp;amp; Best Practices (ANT201-R1)&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href="https://www.youtube.com/watch?v=v5lkNHib7bw&amp;amp;list=PLb5aX5hRnKxN-nDMIyECsudxGPrh67_RR&amp;amp;index=3&amp;amp;t=6s"&gt;AWS re:Invent 2018: Effective Data Lakes: Challenges and Design Patterns (ANT316)&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href="https://www.youtube.com/watch?v=jKPlGznbfZ0&amp;amp;list=PLb5aX5hRnKxN-nDMIyECsudxGPrh67_RR&amp;amp;index=4&amp;amp;t=6s"&gt;AWS re:Invent 2018: High Performance Data Streaming with Amazon Kinesis: Best Practices (ANT322-R1)&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href="https://www.youtube.com/watch?v=ISl9sTzxoSo&amp;amp;list=PLb5aX5hRnKxN-nDMIyECsudxGPrh67_RR&amp;amp;index=5&amp;amp;t=4s"&gt;AWS re:Invent 2018: [REPEAT 1] A Deep Dive into What's New with Amazon EMR (ANT340-R1)&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href="https://www.youtube.com/watch?v=GgLKodmL5xE&amp;amp;list=PLb5aX5hRnKxN-nDMIyECsudxGPrh67_RR&amp;amp;index=7&amp;amp;t=4s"&gt;Amazon Redshift Masterclass&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;White Papers:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;  Big Data Analytics Options on AWS (January 2016)  &lt;a href="https://d1.awsstatic.com/whitepapers/Big_Data_Analytics_Options_on_AWS.pdf"&gt;PDF&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  Streaming Data Solutions on AWS with Amazon Kinesis (July 2017)  &lt;a href="https://d1.awsstatic.com/whitepapers/whitepaper-streaming-data-solutions-on-aws-with-amazon-kinesis.pdf"&gt;PDF&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  Data Warehousing on AWS (March 2016)  &lt;a href="https://d1.awsstatic.com/whitepapers/enterprise-data-warehousing-on-aws.pdf"&gt;PDF&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;  Best Practices for Amazon EMR (August 2013)  &lt;a href="https://d0.awsstatic.com/whitepapers/aws-amazon-emr-best-practices.pdf"&gt;PDF&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Study Guide:&lt;/p&gt;

&lt;h2&gt;
  
  
  EMR
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;EMR security :&lt;/strong&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Data at rest&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Data residing on &lt;a href="https://aws.amazon.com/s3"&gt;Amazon S3&lt;/a&gt;—S3 client-side encryption with EMR&lt;/li&gt;
&lt;li&gt;Data residing on disk—the Amazon EC2 instance store volumes (except boot volumes) and the attached Amazon EBS volumes of cluster instances are encrypted using Linux Unified Key System (LUKS)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Data in transit&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Data in transit from E,MR to S3, or vice versa—S3 client-side encryption with EMR&lt;/li&gt;
&lt;li&gt;Data in transit between nodes in a cluster—in-transit encryption via Secure Sockets Layer (SSL) for MapReduce and Simple Authentication and Security Layer (SASL) for Spark shuffle encryption&lt;/li&gt;
&lt;li&gt;Data being spilled to disk or cached during a shuffle phase—Spark shuffle encryption or LUKS encryption&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;a href="https://aws.amazon.com/blogs/big-data/secure-amazon-emr-with-encryption/"&gt;https://aws.amazon.com/blogs/big-data/secure-amazon-emr-with-encryption/&lt;/a&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;EMR Consistent View:&lt;/strong&gt; EMRFS consistent view is an optional feature available when using Amazon EMR release version 3.2.1 or later. Consistent view allows EMR clusters to check for list and read-after-write consistency for Amazon S3 objects written by or synced with EMRFS. When you create a cluster with consistent view enabled, Amazon EMR uses an Amazon DynamoDB database to store object metadata and track consistency with Amazon S3. If consistent view determines that Amazon S3 is inconsistent during a file system operation, it retries that operation according to rules that you can define. By default, the DynamoDB database has 500 read capacity and 100 write capacity. You can configure read/write capacity settings depending on the number of objects that EMRFS tracks and the number of nodes concurrently using the metadata.&lt;/li&gt;
&lt;li&gt;Use Apache Zeppelin as a notebook for interactive data exploration.Apache Zeppelin is an open source GUI which creates interactive and collaborative notebooks for data exploration using Spark. You can use Scala, Python, SQL (using Spark SQL), or HiveQL to manipulate data and quickly visualize results.&lt;/li&gt;
&lt;li&gt;The Ganglia open source project is a scalable, distributed system designed to monitor clusters and grids while minimizing the impact on their performance. When you enable Ganglia on your cluster, you can generate reports and view the performance of the cluster as a whole, as well as inspect the performance of individual node instances. Ganglia is also configured to ingest and visualize Hadoop and Spark metrics.&lt;/li&gt;
&lt;li&gt;Apache Cassandra and &lt;a href="https://www.blogger.com/null"&gt;Apache HBase&lt;/a&gt; are columnar Database. Compare to Dynamo DB, Apache HBase is much more flexible in terms of what you can store (size and data type wise). Apache HBase gives you the option to have very flexible row key data types, whereas DynamoDB only allows scalar types for the primary key attributes. DynamoDB, on the other hand, provides very easy creation and maintenance of secondary indexes, something that you have to do manually in Apache HBase.&lt;/li&gt;
&lt;li&gt; In EMR, The default input format for a cluster is text files with each line separated by a newline (\n) character, which is the input format most commonly used. If your input data is in a format other than the default text files, you can use the Hadoop interface toInputFormat specify other input types.&lt;/li&gt;
&lt;li&gt;&lt;p&gt;If you are using Hive, you can use a serializer/deserializer (SerDe) to read data in from a given format into HDFS.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Hue (Hadoop User Experience) is an open-source, web-based, graphical user interface for use with Amazon EMR and Apache Hadoop. Hue groups together several different Hadoop ecosystem projects into a configurable interface. Hue acts as a front-end for applications that run on your cluster, allowing you to interact with applications using an interface that may be more familiar or user-friendly. The applications in Hue, such as the Hive and Pig editors, replace the need to log in to the cluster to run scripts interactively using each application's respective shell. After a cluster launches, you might interact entirely with applications using Hue or a similar interface.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Amazon EMR supports Apache Mahout, a machine learning framework for Apache Hadoop. Mahout is a machine learning library with tools for clustering, classification, and several types of recommenders, including tools to calculate most-similar items or build item recommendations for users. Mahout employs the Hadoop framework to distribute calculations across a cluster, and now includes additional work distribution methods, including Spark.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;You can run your EMR cluster as a transient process: one that launches the cluster, loads the input data, processes the data, stores the output results, and then automatically shuts down. This is the standard model for a cluster that performs a periodic processing task. Shutting down the cluster automatically ensures that you are only billed for the time required to process your data.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Spark has micro-batching but can guarantee only-once-delivery if configured.  Spark's four modules are MLlib, SparkSQL, Spark Streaming, and GraphX.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;When your cluster runs, Hadoop creates a number of map and reduce tasks. These determine the number of tasks that can run simultaneously during your cluster. Run too few tasks and you have nodes sitting idle; run too many and there is significant framework overhead.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;If you want that data to be encrypted in-transit between nodes, then Hadoop encrypted shuffle has to be setup. Encrypted Shuffle capability allows encryption of the MapReduce shuffle using HTTPS. When you select the in-transit encryption checkbox in the EMR security configuration, Hadoop Encrypted Shuffle is automatically setup for you upon cluster launch&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Do not use Spark for batch processing. With Spark, there is minimal disk I/O, and the data being queried from the multiple data stores needs to fit into memory. Queries that require a lot of memory can fail. For large batch jobs, consider Hive. Also avoid using Spark for multi-user reporting with many concurrent requests.&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  RedShift
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;When you create a table, you designate one of three distribution styles; EVEN, KEY, or ALL.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;strong&gt;Even distribution&lt;/strong&gt;&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;The leader node  &lt;strong&gt;distributes the rows across the slices in a round-robin fashion&lt;/strong&gt; , regardless of the values in any particular column.  &lt;strong&gt;EVEN distribution is appropriate when a table does not participate in joins ** or when there is not a clear choice between KEY distribution and ALL distribution.  **EVEN distribution is the default distribution style.&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;&lt;strong&gt;Key distribution&lt;/strong&gt;  : The rows are  &lt;strong&gt;distributed according to the values in one column&lt;/strong&gt;. The leader node will attempt to place matching values on the same node slice.  &lt;strong&gt;If you distribute a pair of tables on the joining keys, the leader node collocates the rows on the slices according to the values in the joining&lt;/strong&gt;  columns so that matching values from the common columns are physically stored together.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;*&lt;em&gt;ALL distribution : A copy of the entire table is distributed to every node. *&lt;/em&gt; Where EVEN distribution or KEY distribution place only a portion of a table's rows on each node, ALL distribution ensures that every row is collocated for every join that the table participates in.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;ALL distribution multiplies the storage required by the number of nodes in the cluster, and so it takes much longer to load, update, or insert data into multiple tables. ALL  &lt;strong&gt;distribution is appropriate only for relatively slow-moving tables; that is, tables that are not updated frequently or extensively.&lt;/strong&gt;  Small dimension tables do not benefit significantly from ALL distribution, because the cost of redistribution is low.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;strong&gt;RedShift UNLOAD:&lt;/strong&gt;  Unloads the result of a query to one or more files on S3, using Amazon S3 server-side encryption (SSE-S3). You can also specify server-side encryption with an AWS Key Management Service key (SSE-KMS) or client-side encryption with a customer-managed key (CSE-CMK).You can manage the size of files on Amazon S3, and, by extension, the number of files, by setting the MAXFILESIZE parameter.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Number of the slice in Redshift Node&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;o   ds2.xlarge , dc1.large, dc2.large = 2&lt;/p&gt;

&lt;p&gt;o   ds2.8xlarge, dc2.8xlarge = 16&lt;/p&gt;

&lt;p&gt;o   dc1.8xlarge = 32&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;For Redshift COPY command, Split your load data files so that the files are about equal size, between 1 MB and 1 GB after compression. The number of files should be a multiple of the number of slices in your cluster.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;To validate the data in the Amazon S3 input files or Amazon DynamoDB table before you actually load the data in Redshift, use the NOLOAD option with the &lt;a href="https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html"&gt;COPY&lt;/a&gt; command. Use NOLOAD with the same COPY commands you use to actually load the data. NOLOAD checks the integrity of all of the data without loading it into the database. The NOLOAD option displays any errors ins same manner as that would occur if you had attempted to load the data.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;There are two types of snapshots: &lt;em&gt;automated&lt;/em&gt; and &lt;em&gt;manual&lt;/em&gt;. Amazon Redshift stores these snapshots internally in Amazon S3 by using an encrypted Secure Sockets Layer (SSL) connection. If you need to restore from a snapshot, Amazon Redshift creates a new cluster and imports data from the snapshot that you specify.  Read more - &lt;a href="https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html"&gt;https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html&lt;/a&gt;&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;You can use a manifest to ensure that the COPY command loads all of the required files, and only the required files, for a data load. Instead of supplying an object path for the COPY command, you supply the name of a JSON-formatted text file that explicitly lists the files to be loaded. The URL in the manifest must specify the bucket name and full object path for the file, not just a prefix. You can use a manifest to load files from different buckets or files that do not share the same prefix.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;strong&gt;RedshiftCopyActivity&lt;/strong&gt; : Copies data from DynamoDB or Amazon S3 to Amazon Redshift. You can load data into a new table, or easily merge data into an existing table. In addition, RedshiftCopyActivity let's you work with an S3DataNode, since it supports a manifest file.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;In Redshift, You can efficiently add new data to an existing table by using a combination of updates and inserts from a staging table. While Amazon Redshift does not support a single &lt;em&gt;merge&lt;/em&gt;, or &lt;em&gt;upsert&lt;/em&gt;, command to update a table from a single data source, you can perform a merge operation by creating a staging table&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Redshift cluster resizing - &lt;a href="https://docs.aws.amazon.com/redshift/latest/mgmt/rs-resize-tutorial.html"&gt;https://docs.aws.amazon.com/redshift/latest/mgmt/rs-resize-tutorial.html&lt;/a&gt;&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;EXPLAIN command Displays the execution plan for a query statement without running the query in Redshift&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;By default, Amazon Redshift configures one queue with a &lt;em&gt;concurrency level&lt;/em&gt; of five, which enables up to five queries to run concurrently, plus one predefined Superuser queue, with a concurrency level of one. You can define up to eight queues. Each queue can be configured with a maximum concurrency level of 50. The maximum total concurrency level for all user-defined queues (not including the Superuser queue) is 50.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;In Redshift , If you enable SQA, you can reduce or eliminate workload management (WLM) queues that are dedicated to running short queries. In addition, long-running queries don't need to contend with short queries for slots in a queue, so you can configure your WLM queues to use fewer query slots. When you use lower concurrency, query throughput is increased and overall system performance is improved for most workloads.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;In RedShift , Short query acceleration (SQA) prioritizes selected short-running queries ahead of longer-running queries. SQA executes short-running queries in a dedicated space, so that SQA queries aren't forced to wait in queues behind longer queries. With SQA, short-running queries begin running more quickly and users see results sooner.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;In Redshift The CASE expression is a conditional expression, similar to if/then/else statements found in other languages. CASE is used to specify a result when there are multiple conditions.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;In Redshift , You enable encryption when you launch a cluster. To migrate from an unencrypted cluster to an encrypted cluster, you first unload your data from the existing, source cluster. Then you reload the data in a new, target cluster with the chosen encryption setting. During the migration process, your source cluster is available for read-only queries until the last step. The last step is to rename the target and source clusters, which switches endpoints so all traffic is routed to the new, target cluster&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Amazon Redshift logs information about connections and user activities in your database. These logs help you to monitor the database for security and troubleshooting purposes, for database auditing. The logs are stored in the S3 buckets for convenient access with data security features for users who are responsible for monitoring activities in the database.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;S3, DynamoDB, and EMR/EC2 instances directly integrate with Redshift using the COPY command.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;The UNLOAD ENCRYPTED command automatically stores the data encrypted using client side encryption and uses HTTPS to encrypt the data during the transfer to S3.If you want to ensure files are automatically encrypted on S3 with server-side encryption, no special action is needed. The unload command automatically creates files using Amazon S3 server-side encryption with AWS-managed encryption keys (SSE-S3).&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;For RedShift data , bzip2 compression algorithm has the highest compression ratio.  ** **&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;strong&gt;To manually migrate an Amazon Redshift cluster to another AWS account&lt;/strong&gt; , follow these steps:&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;a href="https://docs.aws.amazon.com/redshift/latest/mgmt/managing-snapshots-console.html#snapshot-create"&gt;Create a manual snapshot&lt;/a&gt; of the cluster you want to migrate.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;a href="https://docs.aws.amazon.com/redshift/latest/mgmt/managing-snapshots-console.html#snapshot-share"&gt;Share the cluster snapshot&lt;/a&gt; with another AWS account to view and restore the snapshot.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Before you copy a snapshot to another region, first &lt;a href="https://docs.aws.amazon.com/redshift/latest/mgmt/managing-snapshots-console.html#snapshot-crossregioncopy-configure"&gt;enable cross-region snapshots&lt;/a&gt;.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;In the destination AWS account, &lt;a href="https://docs.aws.amazon.com/redshift/latest/mgmt/managing-snapshots-console.html#snapshot-restore"&gt;restore the shared cluster snapshot&lt;/a&gt;.&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Kinesis
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;The unit of data stored by Kinesis Data Streams is a &lt;em&gt;data record&lt;/em&gt;. A &lt;em&gt;stream&lt;/em&gt; represents a group of data records. The data records in a stream are distributed into shards.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;A &lt;em&gt;shard&lt;/em&gt; has a sequence of data records in a stream. When you create a stream, you specify the number of shards for the stream. Each shard can support up to 5 transactions per second for reads, up to a maximum total data read rate of 2 MB per second. Shards also support up to 1,000 records per second for writes, up to a maximum total data write rate of 1 MB per second (including partition keys). The total capacity of a stream is the sum of the capacities of its shards. You can increase or decrease the number of shards in a stream as needed. However, you are charged on a per-shard basis.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;If you have sensitive data, you can enable server-side data encryption when you use Amazon Kinesis Data Firehose. However, this is only possible if you use a Kinesis stream as your data source. When you configure a Kinesis stream as the data source of a Kinesis Data Firehose delivery stream, Kinesis Data Firehose no longer stores the data at rest. Instead, the data is stored in the Kinesis stream.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;When you send data from your data producers to your Kinesis stream, the Kinesis Data Streams service encrypts your data using an AWS KMS key before storing it at rest. When your Kinesis Data Firehose delivery stream reads the data from your Kinesis stream, the Kinesis Data Streams service first decrypts the data and then sends it to Kinesis Data Firehose. Kinesis Data Firehose buffers the data in memory based on the buffering hints that you specify and then delivers it to your destinations without storing the unencrypted data at rest.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;In Kinesis , To prevent skipped records, handle all exceptions within processRecords appropriately.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;For each Amazon Kinesis Data Streams application, the KCL uses a unique Amazon DynamoDB table to keep track of the application's state. Because the KCL uses the name of the Amazon Kinesis Data Streams application to create the name of the table, each application name must be unique.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;If your Amazon Kinesis Data Streams application receives provisioned-throughput exceptions, you should increase the provisioned throughput for the DynamoDB table. The KCL creates the table with a provisioned throughput of 10 reads per second and 10 writes per second, but this might not be sufficient for your application. For example, if your Amazon Kinesis Data Streams application does frequent checkpointing or operates on a stream that is composed of many shards, you might need more throughput.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;** **  PutRecord returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record. Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the SequenceNumberForOrdering parameter.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Details regarding Kinesis records :&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt; &lt;a href="https://docs.aws.amazon.com/streams/latest/dev/kinesis-kpl-concepts.html"&gt;https://docs.aws.amazon.com/streams/latest/dev/kinesis-kpl-concepts.html&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;&lt;a href="https://docs.aws.amazon.com/streams/latest/dev/kinesis-record-processor-scaling.html"&gt;https://docs.aws.amazon.com/streams/latest/dev/kinesis-record-processor-scaling.html&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;
&lt;li&gt;&lt;p&gt;For live streaming Kinesis gets ruled out if record size greater than 1 MB , in that case Kafka can support bigger records.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;You can trigger One lambda per shard. If you want to use Lambda with Kinesis Streams, you need to create Lambda functions to automatically read batches of records off your Amazon Kinesis stream and process them if records are detected on the stream. AWS Lambda then polls the stream periodically (once per second) for new records.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;In Kinesis stream ,the PutRecordBatch() operation can take up to 500 records per call or 4 MB per call, whichever is smaller. Buffer size ranges from 1 MB to 128 MB.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;In circumstances where data delivery to the destination is falling behind data ingestion into the delivery stream, Amazon Kinesis Firehose raises the buffer size automatically to catch up and make sure that all data is delivered to the destination.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;If data delivery to  Redshift fail from Kinesis Firehose , Amazon Kinesis Firehose retries data delivery every 5 minutes for up to a maximum period of 60 minutes. After 60 minutes, Amazon Kinesis Firehose skips the current batch of S3 objects that are ready for COPY and moves on to the next batch. The information about the skipped objects is delivered to your S3 bucket as a manifest file in the errors folder, which you can use for manual backfill. For information about how to COPY data manually with manifest files, see Using a Manifest to Specify Data Files.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;If data delivery to your Amazon S3 bucket fails , Amazon Kinesis Firehose retries to deliver data every 5 seconds for up to a maximum period of 24 hours. If the issue continues beyond the 24-hour maximum retention period, it discards the data.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Aggregation refers to the storage of multiple records in a Streams record. Aggregation allows customers to increase the number of records sent per API call, which effectively increases producer throughput._ Aggregation &lt;em&gt;Storing multiple records within a single Kinesis Data Streams record&lt;/em&gt; while Collection_ using the API operation PutRecords to send multiple Kinesis Data Streams records to one or more shards in your Kinesis data stream.You can first aggregate stream record and then send them to stream using collection putrecords() in multiple shard.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;** ** Spark Streaming uses the Kinesis Client Library (KCL) to consume data from a Kinesis stream. KCL handles complex tasks like load balancing, failure recovery, and check-pointing&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;The  &lt;strong&gt;Amazon Kinesis Connector Library&lt;/strong&gt;  helps Java developers integrate &lt;a href="http://aws.amazon.com/kinesis/"&gt;Amazon Kinesis&lt;/a&gt; with other AWS and non-AWS services. The current version of the library provides connectors for &lt;a href="http://aws.amazon.com/dynamodb/"&gt;Amazon DynamoDB&lt;/a&gt;, &lt;a href="http://aws.amazon.com/redshift/"&gt;Amazon Redshift&lt;/a&gt;, &lt;a href="http://aws.amazon.com/s3/"&gt;Amazon S3&lt;/a&gt;, &lt;a href="http://www.elasticsearch.org/"&gt;Elasticsearch&lt;/a&gt;.&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Amazon ML
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;Amazon ML Key component:&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;o   &lt;a href="https://docs.aws.amazon.com/machine-learning/latest/dg/amazon-machine-learning-key-concepts.html#datasources"&gt;Datasources&lt;/a&gt; contain metadata associated with data inputs to Amazon ML&lt;/p&gt;

&lt;p&gt;o   &lt;a href="https://docs.aws.amazon.com/machine-learning/latest/dg/amazon-machine-learning-key-concepts.html#ml-models"&gt;ML Models&lt;/a&gt; generate predictions using the patterns extracted from the input data&lt;/p&gt;

&lt;p&gt;o   &lt;a href="https://docs.aws.amazon.com/machine-learning/latest/dg/amazon-machine-learning-key-concepts.html#evaluations"&gt;Evaluations&lt;/a&gt; measure the quality of ML models&lt;/p&gt;

&lt;p&gt;o   Batch Predictions &lt;em&gt;asynchronously&lt;/em&gt; generate predictions for multiple input data observations&lt;/p&gt;

&lt;p&gt;o   &lt;a href="https://docs.aws.amazon.com/machine-learning/latest/dg/amazon-machine-learning-key-concepts.html#real-time-predictions"&gt;Real-time Predictions&lt;/a&gt; &lt;em&gt;synchronously&lt;/em&gt; generate predictions for individual data observations&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;Amazon ML supports three types of ML models: binary classification, multiclass (Categorial) classification, and regression. - &lt;a href="https://docs.aws.amazon.com/machine-learning/latest/dg/types-of-ml-models.html"&gt;https://docs.aws.amazon.com/machine-learning/latest/dg/types-of-ml-models.html&lt;/a&gt;&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Details on Amazon ML - &lt;a href="https://docs.aws.amazon.com/machine-learning/latest/dg/amazon-machine-learning-key-concepts.html"&gt;https://docs.aws.amazon.com/machine-learning/latest/dg/amazon-machine-learning-key-concepts.html&lt;/a&gt;&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="table-wrapper-paragraph"&gt;&lt;table&gt;
&lt;thead&gt;
&lt;tr&gt;
&lt;th&gt;AUC&lt;/th&gt;
&lt;th&gt;Area Under the Curve (AUC) measures the ability of a binary ML model to predict a higher score for positive examples as compared to negative examples.&lt;/th&gt;
&lt;/tr&gt;
&lt;/thead&gt;
&lt;tbody&gt;
&lt;tr&gt;
&lt;td&gt;Macro-averaged F1-score&lt;/td&gt;
&lt;td&gt;The macro-averaged F1-score is used to evaluate the predictive performance of multiclass ML models.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;RMSE&lt;/td&gt;
&lt;td&gt;The Root Mean Square Error (RMSE) is a metric used to evaluate the predictive performance of regression ML models.&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;
&lt;/table&gt;&lt;/div&gt;

&lt;ul&gt;
&lt;li&gt;A lower Area Under Curve (AUC) reduces accuracy of the prediction; AUC values well below 0.5 may indicate a problem with the data. The F1 score's range is 0 to 1. A larger value indicates better predictive accuracy.&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Amazon ML is limited to 100 'categorical' recommendations . Amazon ML can support up to 100 GB of data.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;You must upload your input data to S3 because Amazon ML reads data from Amazon S3 locations. You can upload your data directly to Amazon S3, or Amazon ML can copy data that you've stored in Redshift or RDS into a .csv file and upload it to S3.&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  ElasticSearch
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;ElasticSearch is suitable to analyze large set of streaming data from Kinesis.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Elastic Search Snapshots are backups of a cluster's data and state. They provide a convenient way to migrate data across Amazon Elasticsearch Service domains and recover from failure. The service supports restoring from snapshots taken on both Amazon ES domains and self-managed Elasticsearch clusters.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Amazon ES takes daily automated snapshots of the primary index shards in a domain, as described in &lt;a href="https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-snapshots"&gt;Configuring Automatic Snapshots&lt;/a&gt;. The service stores up to 14 of these snapshots for no more than 30 days in a preconfigured Amazon S3 bucket at no additional charge to you. You can use these snapshots to restore the domain.&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  IoT and Others
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;For AWS IoT - Devices connect using your choice of identity (X.509 certificates, IAM users and groups, Amazon Cognito identities, or custom authentication tokens) over a secure connection according to the AWS IoT connection model.  Note that KMS is not in list.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;AWS IoT rule actions specify what to do when a rule is triggered. You can create actions for the following services: CloudWatch, DynamoDB, Elasticsearch, Kinesis Firehose, Kinesis Streams, Kinesis Firehose, Lambda, S3, SNS, and SQS. Not for any relational store like Aurora and Redshift.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;The Rules Engine transforms messages using a SQL-based syntax. The Device Gateway (Message Broker) uses topics to route messages from publishing clients to subscribing clients. Then a message is published to the topic, the SQL statement is evaluated and rule action is triggered, sending the message to another AWS service.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;In AWS Data Pipeline, a precondition is a pipeline component containing conditional statements that must be true before an activity can run. For example, a precondition can check whether source data is present before a pipeline activity attempts to copy it. AWS Data Pipeline provides several pre-packaged preconditions that accommodate common scenarios, such as whether a database table exists, whether an Amazon S3 key is present, and so on. However, preconditions are extensible and allow you to run your own custom scripts to support endless combinations.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;AWS Data Pipeline supports A JDBC database , A RDS Database and Redshift . computational resource that perform the work on  EC2 and EMR cluster.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Data Pipeline integrate with on-premise servers. AWS provides you with a Task Runner package that you install on your on-premise hosts. Once installed, the package polls Data Pipeline for work to perform. If it detects that an activity needs to run on your on-premise host (based on the schedule in Data Pipeline), the Task Runner will issue the appropriate command to run the activity, which can be running a stored procedure or a database dump or another database activity.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;A QuickSight Dashboard is a read-only snapshot of an analysis. You can share it with other Quicksight users for reporting. The data in the dashboard reflects the data set that is used by the analysis. If you share a dashboard with users, they can then view and filter the dashboard data, but they cannot save any filters applied to the dashboard.&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  DynamoDB
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;Number of Dynamo DB partition = readCapacityUnit/3000 + WriteCapacityUnit/1000. 10 GB data per partition after that it create new partition.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Details on GSI Vs LSI in Dynamo DB - &lt;a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/SecondaryIndexes.html"&gt;https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/SecondaryIndexes.html&lt;/a&gt;&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;COPY command used for Loading Data from DynamoDB Into Amazon Redshift.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;DAX (&lt;em&gt;DynamoDB Accelerator&lt;/em&gt;)is a DynamoDB-compatible caching service that enables you to benefit from fast in-memory performance for demanding application.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;A &lt;em&gt;VPC endpoint&lt;/em&gt; for DynamoDB and S3 enables Amazon EC2 instances in your VPC to use their private IP addresses to access DynamoDB/S3 with no exposure to the public Internet.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;DynamoDB Streams supports the following stream &lt;em&gt;record&lt;/em&gt; views:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;KEYS_ONLY—Only the key attributes of the modified item&lt;/li&gt;
&lt;li&gt;NEW_IMAGE—The entire item, as it appears after it was modified&lt;/li&gt;
&lt;li&gt;OLD_IMAGE—The entire item, as it appears before it was modified&lt;/li&gt;
&lt;li&gt;NEW_AND_OLD_IMAGES—Both the new and the old images of the item&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;
&lt;li&gt;&lt;p&gt;DynamoDB supports many different data types for attributes within a table. They can be categorized as follows:&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;o    &lt;strong&gt;Scalar Types&lt;/strong&gt;  – A scalar type can represent exactly one value. The scalar types are number, string, binary, Boolean, and null.&lt;/p&gt;

&lt;p&gt;o    &lt;strong&gt;Document Types&lt;/strong&gt;  – A document type can represent a complex structure with nested attributes—such as you would find in a JSON document. The document types are list and map.&lt;/p&gt;

&lt;p&gt;o    &lt;strong&gt;Set Types&lt;/strong&gt;  – A set type can represent multiple scalar values. The set types are string set, number set, and binary set.&lt;/p&gt;

</description>
      <category>aws</category>
      <category>certification</category>
      <category>bigdata</category>
      <category>emr</category>
    </item>
    <item>
      <title>AWS Certified Solutions Architect: Professional study guide</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Mon, 07 Oct 2019 22:25:42 +0000</pubDate>
      <link>https://dev.to/dietertroy/aws-certified-solutions-architect-professional-study-guide-24g6</link>
      <guid>https://dev.to/dietertroy/aws-certified-solutions-architect-professional-study-guide-24g6</guid>
      <description>&lt;p&gt;As a follow up to my previous post regarding &lt;a href="https://dev.to/dietertroy/aws-certified-solutions-architect-associate-study-guide-38c2"&gt;AWS Certified Solutions Architect: Associate&lt;/a&gt; study guide, I figured I'd provide an update.&lt;/p&gt;

&lt;p&gt;I &lt;strong&gt;passed&lt;/strong&gt; the Certified Solutions Architect: Professional! Here are my key-takeaways for those who want a TL;DR:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;It was long, like very long. It's definitely an exercise in quick comprehension and elimination.&lt;/li&gt;
&lt;li&gt;It was difficult, easily one of the hardest exams I've taken to date.&lt;/li&gt;
&lt;li&gt;Most of the exams posted online only replicate about 20% of what will actually be on the exam.&lt;/li&gt;
&lt;li&gt;Hands-on experience at this level is a MUST. Unlike the Architect: Associate, you must live &amp;amp; breath Amazon Web Services.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;As a reference, here is the certification learning path for the AWS Certified Solutions Architect:&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--zTt1DEB7--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/static/aws-architect-path.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--zTt1DEB7--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/static/aws-architect-path.png" alt="cert path"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Here are the resources I used to study:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Jon Bonso's &lt;a href="https://www.udemy.com/course/aws-solutions-architect-professional-practice-exams-2018/"&gt;Solutions Architect Professional practice exams&lt;/a&gt; on uDemy&lt;/li&gt;
&lt;li&gt;All of my material from the previous post (see above for link)&lt;/li&gt;
&lt;li&gt;The &lt;a href="//exitcertified.com/training/aws/architect/aws-csap-workshop-45551-detail.html?event=121429"&gt;great folks over at ExitCertified&lt;/a&gt; for a full day boot camp (virtual). This covered exam tactics, question dissemination and was a great live classroom setting.&lt;/li&gt;
&lt;li&gt;
&lt;a href="https://www.youtube.com/playlist?list=PLWOdWLqFC9RiQs-GT4tkt4GcXeFJfkt20"&gt;re:Invent playlist videos&lt;/a&gt; regarding the topics on the exam.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Also, there are things that I subscribe to on a daily basis that you don't really think about. This helps me keep fresh on service updates, releases, etc:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Follow the Amazon Web Services Facebook &amp;amp; LinkedIn pages&lt;/li&gt;
&lt;li&gt;Follow the AWS Twitter handle along with finding those who work for AWS and tweet daily about their service.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Otherwise that sums it up, thanks for reading!&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--VlCAEgGA--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://fc03.deviantart.net/fs70/f/2011/127/6/d/the_more_you_nyan_by_wanderingkree-d3fsh1o.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--VlCAEgGA--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://fc03.deviantart.net/fs70/f/2011/127/6/d/the_more_you_nyan_by_wanderingkree-d3fsh1o.jpg" alt="the moar nyan noes"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
      <category>aws</category>
      <category>awscsa</category>
      <category>certified</category>
      <category>csa</category>
    </item>
    <item>
      <title>HashiCorp Vault+AWS Secrets+Rotation tool?</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Thu, 05 Sep 2019 20:06:54 +0000</pubDate>
      <link>https://dev.to/dietertroy/hashicorp-vault-aws-secrets-rotation-tool-eif</link>
      <guid>https://dev.to/dietertroy/hashicorp-vault-aws-secrets-rotation-tool-eif</guid>
      <description>&lt;p&gt;In the process of implementing HashiCorp Vault and utilizing the AWS secrets engine. We have LDAP set up as an auth method and users are able to curl the vault endpoint to pull credentials:&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;curl --request POST --data @payload.json https://vault-api.test.com/v1/auth/ldap/login/me | jq '.'

curl --header "X-Vault-Token: mytoken" --request GET https://vault-api.test.com/v1/aws/creds/grp-aws-r-usersrole | jq
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;Access keys are granted - does anyone know of a script/tool that's already in place that:&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Securely creates the payload.json with your LDAP password&lt;/li&gt;
&lt;li&gt;CURL's the Vault endpoint with the payload&lt;/li&gt;
&lt;li&gt;Deletes the payload file&lt;/li&gt;
&lt;li&gt;Retrieves the token&lt;/li&gt;
&lt;li&gt;CURL's the Vault endpoint with the token to retrieve the ephemeral access keys&lt;/li&gt;
&lt;li&gt;Loads the access keys in the .aws/credentials store&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;I've used &lt;a href="https://github.com/Fullscreen/aws-rotate-key"&gt;https://github.com/Fullscreen/aws-rotate-key&lt;/a&gt; before we moved to Vault but it's no longer applicable.&lt;/p&gt;

&lt;p&gt;Should I write something and share?&lt;/p&gt;

</description>
      <category>vault</category>
      <category>aws</category>
      <category>secrets</category>
      <category>hashicorp</category>
    </item>
    <item>
      <title>AWS Certified Solutions Architect: Associate study guide</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Thu, 15 Aug 2019 15:06:37 +0000</pubDate>
      <link>https://dev.to/dietertroy/aws-certified-solutions-architect-associate-study-guide-38c2</link>
      <guid>https://dev.to/dietertroy/aws-certified-solutions-architect-associate-study-guide-38c2</guid>
      <description>&lt;p&gt;With scheduling my &lt;a href="https://aws.amazon.com/certification/certified-solutions-architect-professional/" rel="noopener noreferrer"&gt;AWS Certified Solutions Architect: Professional&lt;/a&gt; for late September 2019, I figured i'd finally compile all of the notes and gathered content for the &lt;a href="https://aws.amazon.com/certification/certified-solutions-architect-associate/" rel="noopener noreferrer"&gt;AWS Certified Solutions Architect: Associate&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;As a reference, here is the certification learning path for the AWS Certified Solutions Architect:&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Faws-architect-path.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Faws-architect-path.png" alt="cert path"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;General preliminary reminders:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Ensure you have hands-on experience with AWS prior to the exam&lt;/li&gt;
&lt;li&gt;The content below will likely change as AWS releases new services. This was up-to-date when I took the exam in &lt;code&gt;December 2018.&lt;/code&gt;
&lt;/li&gt;
&lt;li&gt;Some of the content is mixed up, still working to get it all properly organized.&lt;/li&gt;
&lt;li&gt;Portions of this has been piece-mealed from various sources. I bring it here to you, to help! :)&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  The Basics (101)
&lt;/h2&gt;

&lt;p&gt;&lt;em&gt;AWS Global Infrastructure&lt;/em&gt;&lt;br&gt;
&lt;em&gt;You will never be tested on numbers (e.g. number of regions/availability zones)&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Region&lt;/em&gt; &lt;br&gt;
    -&amp;gt; Geographical (Brazil, Europe, Asia, etc)). Each region consists of two or more availability zones (AZ). It's not in the same phisical space (if a flooding occurs, other data center can still answer)&lt;br&gt;
Availability zone -&amp;gt; Data center&lt;/p&gt;

&lt;p&gt;&lt;em&gt;North America Regions:&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;US East (Northern Virginia)&lt;/li&gt;
&lt;li&gt;US East (Ohio) Region&lt;/li&gt;
&lt;li&gt;US West (Oregon) Region&lt;/li&gt;
&lt;li&gt;US West (Northern California) Region&lt;/li&gt;
&lt;li&gt;AWS GovCloud (US-West Region)&lt;/li&gt;
&lt;li&gt;Canada (Central)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;em&gt;Edge Locations&lt;/em&gt; -&amp;gt; CDN (content delivery network). They add new ones all the times, over 100 so far. There are many more Edge Locations than Regions.&lt;br&gt;
Main services provided by Amazon. Based on late 2017/early 2018.&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Route53&lt;/em&gt;: DNS service&lt;/p&gt;

&lt;p&gt;&lt;em&gt;EC2&lt;/em&gt;: Virtual machines and/or compute&lt;/p&gt;

&lt;p&gt;&lt;em&gt;ECS&lt;/em&gt;: Virtual machines + docker&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Elastic Beanstalk&lt;/em&gt;: Deploy apps and don't worry about infrastructure. Good for starting users.&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Lambda&lt;/em&gt;: Serverless. Upload code, no need to configure any server/virtual machines. Used by Amazon Echo&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Lightsail&lt;/em&gt;: out of the box cloud. Virtual servers with fixed configs&lt;/p&gt;

&lt;h2&gt;
  
  
  STORAGE
&lt;/h2&gt;

&lt;p&gt;S3: Virtual disk in the cloud. Object based storage&lt;/p&gt;

&lt;p&gt;Glacier: Archive - low cost, but access is not inmediate&lt;/p&gt;

&lt;p&gt;EFS: Elastic file service, mount disks without a specific size, automatically (elastically) grows&lt;/p&gt;

&lt;p&gt;Storage Gateway: VM in premise with S3 support.&lt;/p&gt;

&lt;p&gt;Cache Gateway: cache information from s3 in premise&lt;/p&gt;

&lt;h2&gt;
  
  
  DATABASES
&lt;/h2&gt;

&lt;p&gt;RDS: Mysql, MariaDB, Microsoft SQL, Oracle, Aurora, etc.&lt;/p&gt;

&lt;p&gt;DynamoDB: No relational database (NoSQL database)&lt;/p&gt;

&lt;p&gt;Red Shift: Data warehouse. Copy your own data to create reports&lt;/p&gt;

&lt;p&gt;Elasticache: Cache (can use two technologies: redis or memcached)&lt;/p&gt;

&lt;h2&gt;
  
  
  MIGRATION
&lt;/h2&gt;

&lt;p&gt;Snowball: Portable disk that Amazon sends you, you fill it and send it back, and they import it. Several flavors (depends on how much data you need to move)&lt;/p&gt;

&lt;p&gt;DMS: Database Migration Service: migrate database to AWS&lt;/p&gt;

&lt;p&gt;SMS: Server migration Service: migrate Virtual Machines to AWS&lt;/p&gt;

&lt;h2&gt;
  
  
  ANALYTICS
&lt;/h2&gt;

&lt;p&gt;Athena: Run SQL queries on S3. CVS or XML. Turn flat files into a database&lt;/p&gt;

&lt;p&gt;Elastik Map Reduce (EMR): Big data processing. Large amount of data. Uses HADOP/ Apache Spark in the back&lt;/p&gt;

&lt;h2&gt;
  
  
  SEARCH
&lt;/h2&gt;

&lt;p&gt;ElasticSearch: Uses Lucene. Open source&lt;/p&gt;

&lt;p&gt;Cloud Search: Fully Managed by AWS. Same tecnology as ES&lt;/p&gt;

&lt;p&gt;Kinesis: Stream and analize stream data. Big data&lt;/p&gt;

&lt;p&gt;Sentient Analisis: Social media streams    &lt;/p&gt;

&lt;p&gt;Data Pipeline: move data from place to place. Move S3 to DynamoDB, or viceversa&lt;/p&gt;

&lt;p&gt;Quicksight: Analyze data, create dashboards, etc.&lt;/p&gt;

&lt;h2&gt;
  
  
  SECURITY AND IDENTITY
&lt;/h2&gt;

&lt;p&gt;IAM: Authenticate, permissions, etc.&lt;/p&gt;

&lt;p&gt;Inspector: Inspects virtual machines (status, etc)&lt;/p&gt;

&lt;p&gt;Certificate Manager: Manage SSL Certificates&lt;/p&gt;

&lt;p&gt;Directory Service: Active Directory Management&lt;/p&gt;

&lt;p&gt;WAF (Web Application Firewall): App net protection against DDOS, hacking. On top of the Network firewall&lt;/p&gt;

&lt;p&gt;Artifacts: Compliance documents&lt;/p&gt;

&lt;h2&gt;
  
  
  MANAGEMENT TOOLS
&lt;/h2&gt;

&lt;p&gt;Cloudwatch: Get information on VM, CPU, memory, etc. Stores different kind of logs&lt;/p&gt;

&lt;p&gt;Cloud Formation: Turn servers into code. Templates to build entire networks/servers&lt;/p&gt;

&lt;p&gt;Cloud Trail: Audit AWS Resources &lt;/p&gt;

&lt;p&gt;OpWorks: configuration management service, provides instances of Chef and Puppet. Automated deploys&lt;/p&gt;

&lt;p&gt;Service Catalog: Manage images (vm) and authorized servers in the org&lt;/p&gt;

&lt;p&gt;Trusted Advisor: Automated tips and performance optimizations. Automated scan enviroment for problems/security issues&lt;/p&gt;

&lt;h2&gt;
  
  
  APPLICATION SERVICES
&lt;/h2&gt;

&lt;p&gt;Step functions: visualize steps inside app/microservices. Serverless orchestration&lt;/p&gt;

&lt;p&gt;SWF: simple workflow service. Facilitate task both automated (jobs) and human (IE, pick something from a storage)&lt;/p&gt;

&lt;p&gt;API Gateway: Door to AWS services, backend services or your own code, in AWS. Can call Lambda functions, for example&lt;/p&gt;

&lt;p&gt;AppStream: Stream desktop apps to users&lt;/p&gt;

&lt;p&gt;Elastic Transcoder: video tools. Change format, resize, etc.&lt;/p&gt;

&lt;h2&gt;
  
  
  DEVELOPER TOOLS
&lt;/h2&gt;

&lt;p&gt;Code commit: Store code in the cloud. GIT&lt;/p&gt;

&lt;p&gt;Code build: Pay by minute, compile code in different environments&lt;/p&gt;

&lt;p&gt;Code deploy: Deploy code to EC2. Automatic building, etc.&lt;/p&gt;

&lt;p&gt;Code Pipeline: Keep track of code differences between environments, build pipeline (IE, trigger compile when committing code, run unit test, etc)&lt;/p&gt;

&lt;h2&gt;
  
  
  ARTIFICIAL INTELLIGENCE
&lt;/h2&gt;

&lt;p&gt;Polly: Service that transform text into Mp3&lt;/p&gt;

&lt;p&gt;Machine Learning: dataset, analize data, predict&lt;/p&gt;

&lt;p&gt;Rekognition: Image recognition and processing&lt;/p&gt;

&lt;h2&gt;
  
  
  MESSAGING
&lt;/h2&gt;

&lt;p&gt;SNS: Simple notification service - alert system, via SMS, HTTP endpoint, email, etc.&lt;/p&gt;

&lt;p&gt;SQS: Simple Queue Service: message queue&lt;/p&gt;

&lt;p&gt;SES: Simple Email Service: SMPT&lt;/p&gt;

&lt;p&gt;API Gateway&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;caches responses from endpoint for set period of time (TTL)&lt;/li&gt;
&lt;li&gt;cache can be encrypted, it can be flushed and you can define size&lt;/li&gt;
&lt;li&gt;you can't regenerate the cache&lt;/li&gt;
&lt;li&gt;low cost, efficient&lt;/li&gt;
&lt;li&gt;scales effortlessly&lt;/li&gt;
&lt;li&gt;throttle requests to prevent attacks&lt;/li&gt;
&lt;li&gt;connect to CloudWatch to log requests and troubleshoot&lt;/li&gt;
&lt;li&gt;contains default DDOS protection&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Cross-Origin Resource Sharing (CORS)&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;server on other end can relax same-origin policy&lt;/li&gt;
&lt;li&gt;mechanism that allows restricted resources on web page to be from another domain&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  AWS CLI
&lt;/h2&gt;

&lt;p&gt;Command line to configure AWS&lt;/p&gt;

&lt;p&gt;You don't use your console user and password, you use the security and access key that was provided when you added the IAM User&lt;br&gt;
If you lost this values, you have to re-generate them, it's only show after the user is added. AWS Config&lt;/p&gt;

&lt;p&gt;Detailed view of configuration resources&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Evaluate config with desired settings&lt;/li&gt;
&lt;li&gt;Get snapshots of current config in AWS account&lt;/li&gt;
&lt;li&gt;Retrieve historical configurations&lt;/li&gt;
&lt;li&gt;Receive a notification when resources are created, modified or deleted&lt;/li&gt;
&lt;li&gt;View relationships between resources Compute
EC2 - Elastic Compute Cloud, virtual/dedicate machines in AWS
Lightsail - Virtual Private Server, dumb server with fixed-IP with SSH/RDP access not fully utilizing AWS services
Elastic Container Service - Run and manage docker containers at scale
Lambda - Code uploaded to cloud, you control when it executes (no worrying about machines underneath)
Batch - Used for batch computing in the cloud
Elastic Beanstalk - Developers can upload apps and AWS auto-configures&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Storage
&lt;/h2&gt;

&lt;p&gt;S3 - Simple Storage Service, object-based storage. Files are uploaded into AWS Buckets&lt;br&gt;
EFS - Elastic File System, network-attached storage (NFS)&lt;br&gt;
Glacier - Data archival, used for data that isn't accessed very often and is cheap&lt;br&gt;
Snowball - Way to bring in large amounts of data to AWS, physical snowball is sent to data center and is sent to AWS where they import for you&lt;br&gt;
Storage Gateway - Virtual Appliances, virtual machines you install in your office and data is replicated back to Amazon&lt;/p&gt;

&lt;h2&gt;
  
  
  Databases
&lt;/h2&gt;

&lt;p&gt;RDS - Relational Database Service (MySQL, MSSQL, PostgreSQL, Aurora, Oracle, etc.)&lt;br&gt;
DynamoDB - Non-relational Databases (Redis, etc.)&lt;br&gt;
Elasticache - Caching commonly-queried things from database server (top 10 products, etc.)&lt;br&gt;
Red Shift - Data warehousing/business intelligence, complex queries (doing P&amp;amp;L analysis, time-intensive queries, etc.)&lt;/p&gt;

&lt;h2&gt;
  
  
  Migration
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;AWS Migration Hub - Tracking service to track application as they are migrated to AWS Application Discovery Service (ADS)- Automated tools 
that detects application type and related dependencies Database

&lt;ul&gt;
&lt;li&gt;Migration Service (DMS) - Easy way to migrate database from on-premise to AWS Server Migration Service - Easy way to migrate virtual/physical server into AWS Cloud Snowball - Similar to Storage, this helps to migrate large amounts of data into the cloud&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;h2&gt;
  
  
  Networking &amp;amp; Content Delivery
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;VPC (Virtual Private Cloud) - Basically a virtual data center (configure firewalls, AZ's, address ranges, network ACL's, root tables, etc.). &lt;em&gt;NEED TO UNDERSTAND THIS INSIDE AND OUT TO PASS!&lt;/em&gt;

&lt;ul&gt;
&lt;li&gt;CloudFront - Content Delivery Network by Amazon, delivers media assets to users (video files, audio files, etc) by storing close to users&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;Route53 - Amazon's DNS Service

&lt;ul&gt;
&lt;li&gt;API Gateway - Amazon's way to create API's for services to talk to&lt;/li&gt;
&lt;li&gt;Direct Connect - Amazon's way of running dedicated line from your business into AWS&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;h2&gt;
  
  
  Developer Tools
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;CodeStar - Project managing of code for developers&lt;/li&gt;
&lt;li&gt;CodeCommit - Place to store code (source control), private git repository&lt;/li&gt;
&lt;li&gt;CodeBuild - Compiles, tests code and build packages ready for deployment&lt;/li&gt;
&lt;li&gt;CodeDeploy - Deployment services that will deploy applications to EC2, Lambda, on-premise&lt;/li&gt;
&lt;li&gt;CodePipeline - Continuous Delivery to Model/Visualize/Automate steps for software release&lt;/li&gt;
&lt;li&gt;X-Ray - Used to debug/analyze serverless applications by showing traces&lt;/li&gt;
&lt;li&gt;Cloud9 - IDE Environment to develop code inside AWS console&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Management Tools
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;CloudWatch - Monitoring service, need to know for SysOps Admin exam&lt;/li&gt;
&lt;li&gt;CloudFormation - Automated way to deploy servers/services in AWS, agnosticizing aspects to make deployments faster everywhere&lt;/li&gt;
&lt;li&gt;CloudTrail - Everything that happens in AWS is recorded and logged in CloudTrail and makes for easy tracking of things happening in your environment&lt;/li&gt;
&lt;li&gt;Config - Monitors configuration of entire AWS environment and keeps snapshots of entire AWS environment (visualize AWS environment)&lt;/li&gt;
&lt;li&gt;OpsWorks - Similar to elastic beanstalk, used to automate configuration of environments (convered in  Sysops Admin test)&lt;/li&gt;
&lt;li&gt;Service Catalog - Way of managing a catalog of IT-approved services in AWS. Typically used for governance/compliance in big organizations&lt;/li&gt;
&lt;li&gt;Systems Manager - Managing AWS resources (EC2 patch maintenance, for example). Resources can be grouped by department or application&lt;/li&gt;
&lt;li&gt;Trusted Advisor - Will give advice across many different disciplines. Make sure to know the difference between - Trusted Advisor and Inspector
Managed Services - Amazon will take care of EC2, auto-scaling&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Media Services
&lt;/h2&gt;

&lt;p&gt;&lt;em&gt;Elastic Transcoder&lt;/em&gt; &lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;MediaConvert - File-based video transcoding with broadcast-grade features&lt;/li&gt;
&lt;li&gt;MediaLive - Broadcast-grade live video processing services (video streams)&lt;/li&gt;
&lt;li&gt;MediaPackage - Prepares and protects videos for delivery over the internet&lt;/li&gt;
&lt;li&gt;MediaStore - Place to store media (storage optimized for media)&lt;/li&gt;
&lt;li&gt;MediaTailor - Allows to do targeted-advertising into video streams without sacrificing broadcasting quality&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Machine Learning
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;SageMaker - Makes it easy for developers to use deep learning when coding for their environments
Comprehend - Sentiment analysis around data&lt;/li&gt;
&lt;li&gt;DeepLens - Artificially-aware camera (can understand what it's looking at--localized detection, not running in cloud--physical hardware)&lt;/li&gt;
&lt;li&gt;Lex - Powers Amazon Alexa service, artifical intelligence
Machine Learning - Different to deep learning, entry-level. AWS will analyze data sets and predict outcomes&lt;/li&gt;
&lt;li&gt;Polly - Takes text and turns into speach&lt;/li&gt;
&lt;li&gt;Rekognition - Upload a file and it will do file analysis (upload pic of dog on beach, it will tell you "dog", "beach", etc.)&lt;/li&gt;
&lt;li&gt;Amazon Translate - Machine translation service, like google translate but from Amazon&lt;/li&gt;
&lt;li&gt;Amazon Transcribe - Used for those that are hard of hearing--takes audio and creates text&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Analytics
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;Athena - Run SQL queries against items in S3 buckets (serverless)&lt;/li&gt;
&lt;li&gt;EMR - Elastic Map Reduce, used for processing large amounts of data
CloudSearch - Search service for AWS
ElasticSearch Service - Elastic Search service for AWS&lt;/li&gt;
&lt;li&gt;Kinesis - Way of ingesting large amounts of data into AWS (i.e. social media feeds for specific hashtag)
Kinesis Video Streams - Allows you to ingest large amounts of data on people streaming your media
QuickSight - BI tool, significantly cheaper than competitors&lt;/li&gt;
&lt;li&gt;Data Pipeline - Way of moving data between different AWS services&lt;/li&gt;
&lt;li&gt;Glue - Used for ETL (extract, transform, load), glue is optimized to achieve this&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Security &amp;amp; Identity &amp;amp; Compliance
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;IAM - Identity Access Management
Cognito - Way of doing device authentication (2-factor auth)
GuardDuty - Monitors for malicious activity on AWS account&lt;/li&gt;
&lt;li&gt;Inspector - Agent installed on EC2 instances, run tests against it to check for vulnerabilities--these can be scheduled
Macie - Scan S3 buckets for any personally identifiable information (PII) and alert you&lt;/li&gt;
&lt;li&gt;Certificate Manager - SSL certificates for free if registered through Route53&lt;/li&gt;
&lt;li&gt;CloudHSM - Cloud Hardware Security Module, Dedicated hardware used to store your private/public keys or other keys, can also use keys to encrypt objects on AWS&lt;/li&gt;
&lt;li&gt;Directory Service - Incorporate Microsoft Active Directory with AWS&lt;/li&gt;
&lt;li&gt;WAF - Web Application Firewall (7-layer firewall), monitoring application layer&lt;/li&gt;
&lt;li&gt;Shield - DDoS Mitigation
Artifact - Used for audit and compliance, download audit and compliance reports&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Mobile Services
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;Mobile Hub - Management console for mobile services&lt;/li&gt;
&lt;li&gt;Pinpoint - Targeted push notifications for increased mobile engagement&lt;/li&gt;
&lt;li&gt;AWS AppSync - Automatically updates data in web/mobile applications and will update offline users when they reconnect&lt;/li&gt;
&lt;li&gt;Device Farm - Testing apps on real, live devices&lt;/li&gt;
&lt;li&gt;Mobile Analytics - Analytics service for mobile devices&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  AR/VR
&lt;/h2&gt;

&lt;p&gt;Sumerian - Used for AR/VR 3D app design&lt;/p&gt;

&lt;h2&gt;
  
  
  Application Integrations
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;Step Functions - Way to manage lambda functions and steps to go through it&lt;/li&gt;
&lt;li&gt;Amazon MQ - Message queues (like RabbitMQ)&lt;/li&gt;
&lt;li&gt;SNS - Notification service when triggers are hit&lt;/li&gt;
&lt;li&gt;SQS - Way to decouple intrastructure, take messages in, and allow EC2 instances to poll data&lt;/li&gt;
&lt;li&gt;SWF - Create a workflow to be modeled after a process that you have&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Customer Engagement
&lt;/h2&gt;

&lt;p&gt;Amazon Connect - Contact Center as a Service (CCaaS)&lt;br&gt;
***Simple Email Service - Easy way to send large amounts of emails&lt;/p&gt;

&lt;h2&gt;
  
  
  Business Productivity
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;Alexa For Business - use to dial into rooms, inform IT of problem--Alexa in the workplace&lt;/li&gt;
&lt;li&gt;Amazon Chime - Video conferencing by Amazon&lt;/li&gt;
&lt;li&gt;Work Docs - Like Dropbox for AWS&lt;/li&gt;
&lt;li&gt;WorkMail - Like O365/Gmail for Amazon&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Desktop &amp;amp; App Streaming
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;Workspaces - VDI (Virdual Desktop) that can be accessed in the cloud&lt;/li&gt;
&lt;li&gt;AppStream 2.0 - Stream application live to device (like Citrix)&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Internet of Things
&lt;/h2&gt;

&lt;p&gt;iOT - Can have devices sending back information&lt;br&gt;
iOT Device Management - Used to manage AWS iOT devices&lt;br&gt;
Amazon FreeRTOS - Realtime OS by Amazon&lt;br&gt;
Greengrass - Software to run local compute services in a secure way&lt;/p&gt;

&lt;h2&gt;
  
  
  Game Development
&lt;/h2&gt;

&lt;p&gt;GameLift - Service to help develop game services in AWSShared Responsability:&lt;/p&gt;

&lt;p&gt;AWS is responsible for:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Base hypervision&lt;/li&gt;
&lt;li&gt;Zones&lt;/li&gt;
&lt;li&gt;Network&lt;/li&gt;
&lt;li&gt;Region&lt;/li&gt;
&lt;li&gt;Operative system and patches in RDS databases&lt;/li&gt;
&lt;li&gt;etc &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;YOU are responsible for:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Encryption&lt;/li&gt;
&lt;li&gt;Operative system in your EC2 instances&lt;/li&gt;
&lt;li&gt;Firewalls&lt;/li&gt;
&lt;li&gt;Customer data&lt;/li&gt;
&lt;li&gt;etc.&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  AWS Trusted Advisor
&lt;/h2&gt;

&lt;p&gt;Application that learns from existing AWS customers&lt;br&gt;
Inspects AWS environment and makes recomendations for &lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Saving money&lt;/li&gt;
&lt;li&gt;Improve performance&lt;/li&gt;
&lt;li&gt;Close security gapsBeanstalk&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Deploy, monitor and scale app quickly&lt;br&gt;
Highly abstract focus towards infrastructure&lt;br&gt;
Simplify infra management, uses GUI to configure things. Good for people with few AWS experience&lt;br&gt;
&lt;em&gt;Uses CloudFormation in the background.&lt;/em&gt; &lt;br&gt;
Can be used for Workers/Jobs&lt;/p&gt;

&lt;p&gt;Pre-configured Instance support:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;NodeJS, Python, PHP, Ruby, Tomcat, .NET (Win IIS), JAva, Go, Packet&lt;/li&gt;
&lt;li&gt;Docker images&lt;/li&gt;
&lt;li&gt;Generic docker&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Can have multiple versions of your app&lt;br&gt;
Can be split into tiers (Web / App / DB tier / Front end / Backend, etc)&lt;br&gt;
You can update the configs after created&lt;/p&gt;

&lt;p&gt;Updates can be&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;1 instance at a time&lt;/li&gt;
&lt;li&gt;% of instances&lt;/li&gt;
&lt;li&gt;Immutable (launches all apps from 0 again)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;If Beanstalk creates your RDS, will be deleted if/when you delete the EBS instance &lt;/p&gt;

&lt;p&gt;Business Benefits of Cloud&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Almost zero upfront infrastructure investment&lt;/li&gt;
&lt;li&gt;Just-in-time infrastructure&lt;/li&gt;
&lt;li&gt;More efficient resource utilization&lt;/li&gt;
&lt;li&gt;Usage-based consting&lt;/li&gt;
&lt;li&gt;Reduced time to market&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Technical Benefits of Cloud&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Automation (Scriptable Infrastructure)&lt;/li&gt;
&lt;li&gt;Auto-scaling&lt;/li&gt;
&lt;li&gt;Proactive Scaling&lt;/li&gt;
&lt;li&gt;More efficient development lifecycle&lt;/li&gt;
&lt;li&gt;Improved testability&lt;/li&gt;
&lt;li&gt;Disaster recovery and Business Continuity&lt;/li&gt;
&lt;li&gt;"Overflow" traffic to the CloudCloudformation&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Allows to transform hardware into code&lt;br&gt;
Easy way to create/manage AWS resources&lt;br&gt;
Can apply versioning to AWS infraestructure (like code)&lt;/p&gt;

&lt;p&gt;Template --&amp;gt; Diagram&lt;/p&gt;

&lt;p&gt;Stack --&amp;gt; Result of the diagram&lt;/p&gt;

&lt;p&gt;Format: JSON or YAML&lt;/p&gt;

&lt;p&gt;Template Elements:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Required: list of AWS resources&lt;/li&gt;
&lt;li&gt;Optional:

&lt;ul&gt;
&lt;li&gt;Version, file format&lt;/li&gt;
&lt;li&gt;Template parameters (up to 60)
&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;Output

&lt;ul&gt;
&lt;li&gt;Public IP, ELB addresses (up to 60)&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;Naming&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;You can assign local names, and they are used partially when creating resources. &lt;/li&gt;
&lt;li&gt;Names are not fixed/enforced to avoid conflicts. Some exceptions exists (IE bucket names).&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;You can install software with a set of bootstrapping scripts.&lt;/p&gt;

&lt;p&gt;Includes integrations with Chef and Puppet&lt;/p&gt;

&lt;p&gt;Supports tagging , EBS volumes are automatically tagged&lt;/p&gt;

&lt;p&gt;Once provisioned, you have control of the resources&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Automatic rollback if error is ON by default (everything is deleted if an error occurs). Keep in mind you are charged for errors, but usage of &lt;/li&gt;
&lt;li&gt;CloudFormation is free&lt;/li&gt;
&lt;li&gt;Stacks can wait for app to be provisioned using WaitCondition
Route53 is supported&lt;/li&gt;
&lt;li&gt;IAM Role creation is also supported&lt;/li&gt;
&lt;li&gt;Can define deletion policies for resources, when you delete the stack, resources are not deleted&lt;/li&gt;
&lt;li&gt;200 stacks max, can request more &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;If you want to hide something from Cloudtrail/Cloudwatch, mark the parameter with &lt;code&gt;NOECHO&lt;/code&gt; &lt;/p&gt;

&lt;p&gt;&lt;em&gt;Difference with Elastic Beanstalk?&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;CloudFormation and Elasticbeanstalk compliment eachother
Beanstalk deploys and runs app in the cloud, integrated with dev tools, manage life cycle of apps
CloudFormation is a mechanism to provision AWS resources, template to build the entire infrastructure, including Beanstalk apps&lt;/li&gt;
&lt;/ul&gt;

&lt;h1&gt;
  
  
  Content Delivery Network (CDN)
&lt;/h1&gt;

&lt;p&gt;Edge Location - Location where content will be cached; separate to an AWS Region/AZ&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Origin - Origin of all the files that the CDN will distribute. This can be an S3 Bucket, EC2 Instance, Elastic Load Balancer, or Route53, or not with AWS&lt;/li&gt;
&lt;li&gt;Web Distribution - Typically used for websites&lt;/li&gt;
&lt;li&gt;RTMP - Used for media streaming (adobe flash)&lt;/li&gt;
&lt;li&gt;Edge Locations are not just read only, you can write to them, too&lt;/li&gt;
&lt;li&gt;Objectes are cached for life of TTL
-- Default: 24 hours
-- Max: 365 days&lt;/li&gt;
&lt;li&gt;You can clear objects from the Cloudfront, but you will be charged&lt;/li&gt;
&lt;li&gt;Restrict Viewer access
-- Signed URLs
-- Signed Cookies&lt;/li&gt;
&lt;li&gt;Geo restriction&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Cloudtrail is used to log all the API calls made internally on AWS, mostly for audit&lt;/p&gt;

&lt;p&gt;Since all the settings you change via the console are actually API calls made to the internal AWS API, if you enable Cloudtrail you can get all the information about everything that was done via the console or via specific API calls.&lt;/p&gt;

&lt;p&gt;You can turn on a trail across all regions for your AWS account. Cloudtrail will deliver log files from all regions to a S3 bucket and an optional Cloudwatch log group you specify. &lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Standard Monitoring = 5 minutes&lt;/li&gt;
&lt;li&gt;Detailed Monitoring = 1 Minute&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;You have to pay if you want &lt;em&gt;Detailed Monitoring&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;In Cloudwatch you can&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Create dashboards&lt;/li&gt;
&lt;li&gt;Create alarms&lt;/li&gt;
&lt;li&gt;Create events (state changes for AWS resources for example)
Logs (agregate, monitor and store logs)
Aurora&lt;/li&gt;
&lt;li&gt;MySQL-compatible&lt;/li&gt;
&lt;li&gt;combines speed and availability of high-end commercial databases&lt;/li&gt;
&lt;li&gt;has simplicity an dcost-effectiveness of open source databases&lt;/li&gt;
&lt;li&gt;five times better performance than MySQL at 1/10th price of commercial databases&lt;/li&gt;
&lt;li&gt;storage starts with 10GB, scales in 10GB increments up to 64TB&lt;/li&gt;
&lt;li&gt;compute scales up to 32 vCPU's and 244GB memory&lt;/li&gt;
&lt;li&gt;2 copies of data in each AZ, minimum of 3 AZ's&lt;/li&gt;
&lt;li&gt;designed to transparently handle loss of 2 copies without affecting DB write availabilty&lt;/li&gt;
&lt;li&gt;designed to transparently handle loss of 3 copies without affecting read availability&lt;/li&gt;
&lt;li&gt;self-healing; data blocks/disks are continuously scanned for errors and repaired automatically&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;em&gt;Aurora Replica Features&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Aurora Replicas

&lt;ul&gt;
&lt;li&gt;15
MySQL Read Replicas&lt;/li&gt;
&lt;li&gt;5DynamoDB&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;NoSQL database for consistent, single-digit milisecond latency at any scale&lt;/li&gt;

&lt;li&gt;Stored on SSD storage&lt;/li&gt;

&lt;li&gt;Spread across 3 geographically distinct data centers&lt;/li&gt;

&lt;li&gt;Eventually Consistent Reads

&lt;ul&gt;
&lt;li&gt;Consistency across all copies of data is uaully reached within a second. Repeating read after short time will return updated data (best read performance)&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;Strongly Consistent Reads

&lt;ul&gt;
&lt;li&gt;Returns a result that reflects all writes that received a successful response prior to the read&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;Autoscalling supported (% target utilization, min/max)&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;em&gt;DynamoDB Pricing&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Provisioned Throughput Capacity

&lt;ul&gt;
&lt;li&gt;Write Throughput $0.0065 per hour for every 10 units&lt;/li&gt;
&lt;li&gt;Read Throughput $0.0065 per hour for every 50 units&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;Storage Costs

&lt;ul&gt;
&lt;li&gt;First 25 GB --&amp;gt; Free&lt;/li&gt;
&lt;li&gt;$0.25GB/month&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;Free tier: 25 units read / 25 units write&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;em&gt;DynamoDB Streams&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Capture changes to DynamoDB for 24 hours. Audit trail like (Add, change (before and after), delete). Use LAMBDA if you want to store the data more than 24 hrs&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Max size of each item with attributes: &lt;code&gt;400KB&lt;/code&gt;&lt;br&gt;
BatchWriteItem: &lt;code&gt;25 items, 16MB&lt;/code&gt;&lt;br&gt;
BatchGetItem: &lt;code&gt;100 items, 16MB&lt;/code&gt;&lt;/p&gt;

&lt;p&gt;Scan: &lt;br&gt;
    Eventual or consistency, add parameter ConsistentRead&lt;br&gt;
    Iterator: returns 1MB and LastEvaluatedKey (to paginate)&lt;/p&gt;

&lt;p&gt;Data types:&lt;br&gt;
    Number, string, binary, boolean, NULL&lt;br&gt;
    JSON: stored as document, can create keys and filter by attribute, can update a sub-element, can use document SDK as wrapper (JS)&lt;/p&gt;

&lt;p&gt;Indexes:&lt;br&gt;
Global Secondary Index:&lt;br&gt;
    Can add up to &lt;code&gt;5&lt;/code&gt; per table&lt;br&gt;
Local Secondary Index &lt;br&gt;
    Can add up to &lt;code&gt;5&lt;/code&gt; per table, AT CREATION (can't add them later)&lt;br&gt;
    &lt;code&gt;10GB&lt;/code&gt; PER PARTITION&lt;/p&gt;

&lt;h1&gt;
  
  
  Security
&lt;/h1&gt;

&lt;p&gt;Fine granular access control allows users in IAM to access/deny information (table, items or even attributes)&lt;/p&gt;

&lt;p&gt;Reserved capacity can be bought at discounted price. Limited to a single region.&lt;/p&gt;

&lt;p&gt;Triggers are supported (uses DynamoDB w/Lambda)&lt;/p&gt;

&lt;p&gt;Can specify TTL on tables. Needs to have a timestamp&lt;/p&gt;

&lt;p&gt;DAX: In memory cache (in SDK Node.js &amp;amp; Java)ElastiCache&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Memcached

&lt;ul&gt;
&lt;li&gt;No Multi-AZ support&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;Redis

&lt;ul&gt;
&lt;li&gt;Multi-AZ support&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;When asked which service to use to alleviate stress/load on database:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Elasticache is good choice if database is read heavy and not prone to frequent changing&lt;/li&gt;
&lt;li&gt;Redshift is good if reason database is stressed is because management keeps running OLAP transactions on it
Automated Backups&lt;/li&gt;
&lt;li&gt;Allow you to recover database to any point in time within retention period (1-35 days)&lt;/li&gt;
&lt;li&gt;Take full, daily snapshot&lt;/li&gt;
&lt;li&gt;Store transaction logs&lt;/li&gt;
&lt;li&gt;Enabled by default&lt;/li&gt;
&lt;li&gt;Stored in S3, free equal to size of database&lt;/li&gt;
&lt;li&gt;Deleted when RDS instance is deleted&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;em&gt;Snapshots&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Database snapshots are done manually (stored even after RDS instance is deleted)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;em&gt;Restoring Backups&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;When using either restore option, restored version of database will be a new RDS instance with new DNS endpoint&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;em&gt;Encryption&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Done using AWS Key Management Service (KMS)&lt;/li&gt;
&lt;li&gt;Encrypting existing RDS is not currently supported&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;em&gt;Multi-AZ RDS&lt;/em&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Used for Disaster Recover (DR) only&lt;/li&gt;
&lt;li&gt;Availability

&lt;ul&gt;
&lt;li&gt;SQL Server&lt;/li&gt;
&lt;li&gt;Oracle&lt;/li&gt;
&lt;li&gt;MySQL Server&lt;/li&gt;
&lt;li&gt;PostgreSQL&lt;/li&gt;
&lt;li&gt;MariaDB&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Read Replicas&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Used for scaling, not DR&lt;/li&gt;
&lt;li&gt;Must have automatic backups turned on to deploy a Read Replicas&lt;/li&gt;
&lt;li&gt;You hcan have up to 5 Read Replicas of any database&lt;/li&gt;
&lt;li&gt;Allow you to have a read-only copy of your database&lt;/li&gt;
&lt;li&gt;Achieved using asynchronous replication&lt;/li&gt;
&lt;li&gt;Used for performance improvements, read-heavy database workloads&lt;/li&gt;
&lt;li&gt;Each read replica will have its own DNS end point&lt;/li&gt;
&lt;li&gt;You can have read replicas that have Multi-AZ&lt;/li&gt;
&lt;li&gt;You can create read replicas of Multi-AZ source databases&lt;/li&gt;
&lt;li&gt;Read Replicas can be promoted to be their own databases (breaks replication)&lt;/li&gt;
&lt;li&gt;You can have a read replica in another Region Redshift&lt;/li&gt;
&lt;li&gt;Datawarehousing&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Column Data. Agregation&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Single Node (160GB)&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Multi-Node&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Leader Node, manages client connections and receives queries&lt;/li&gt;
&lt;li&gt;Compute Node, store data and perform queries and computations&lt;/li&gt;
&lt;li&gt;Up to 128 Compute Nodes&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;&lt;p&gt;Columnar Data Storage&lt;/p&gt;&lt;/li&gt;

&lt;li&gt;&lt;p&gt;Massively Parallel Processing (MPP) - Automatic distribution of data and query loads across all Nodes&lt;/p&gt;&lt;/li&gt;

&lt;li&gt;&lt;p&gt;Currently only available in one AZ&lt;/p&gt;&lt;/li&gt;

&lt;li&gt;&lt;p&gt;Can restore snapshots to new AZ in event of outage&lt;/p&gt;&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Costs&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Computer Node Hours&lt;/li&gt;
&lt;li&gt;Backup&lt;/li&gt;
&lt;li&gt;Data Transfer (within a VPC, not outside of)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Encryption&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Encrypted in transit using SSL&lt;/li&gt;
&lt;li&gt;Encrypted at rest using AES-256&lt;/li&gt;
&lt;li&gt;Redshift takes care of key management&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Availability:&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;SINGLE AZ&lt;/li&gt;
&lt;li&gt;Can restore snapshots to other AZ&lt;/li&gt;
&lt;li&gt;Enable Cross-Region snapshot for recovery&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;VPC:&lt;/strong&gt;&lt;br&gt;
Turn on Enhaced VPC routing for VPC endpoints (So data doesn't leave your own VPC)AWS Database Types&lt;br&gt;
Maximun size: 16 TB. If larger, consider Redshift&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;RDS - OTLP (Online Transaction Processing)&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;SQL Server&lt;/li&gt;
&lt;li&gt;Oracle&lt;/li&gt;
&lt;li&gt;MySQL&lt;/li&gt;
&lt;li&gt;PostgreSQL&lt;/li&gt;
&lt;li&gt;Amazon Aurora&lt;/li&gt;
&lt;li&gt;MariaDB

&lt;ul&gt;
&lt;li&gt;DynamoDB&lt;/li&gt;
&lt;li&gt;RedShift OLAP (Online Analytics Processing, Datawarehousing)&lt;/li&gt;
&lt;li&gt;Elasticache&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Non-Relational Database Structure&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Database

&lt;ul&gt;
&lt;li&gt;Collection (table)

&lt;ul&gt;
&lt;li&gt;Document (row)&lt;/li&gt;
&lt;li&gt;Key/Value Pairs (fields)&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Data Warehousing&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Used for Business Intelligence (Cognos, Jaspersoft, etc.)&lt;/li&gt;
&lt;li&gt;OLTP Vs. OLAP
--- OLTP (Online Transaction Processing)
--- --- Order number 2120121
--- --- Pulls up a row of data (name, date, address, status)
--- OLAP (Online Analytics Processing, used for Datawarehousing)
--- --- Pull in large number of records
--- --- Uses different type of architecture for database and infrastructure&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Elasticache&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Web service that makes it easy to deploy, operate, and scale in-memory cache in the cloud&lt;/li&gt;
&lt;li&gt;Types

&lt;ul&gt;
&lt;li&gt;Memcached&lt;/li&gt;
&lt;li&gt;Redis&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;Summary&lt;br&gt;
Database Types&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;RDS (OLTP)

&lt;ul&gt;
&lt;li&gt;SQL&lt;/li&gt;
&lt;li&gt;MySQL&lt;/li&gt;
&lt;li&gt;PostgreSQL&lt;/li&gt;
&lt;li&gt;Oracle&lt;/li&gt;
&lt;li&gt;Aurora&lt;/li&gt;
&lt;li&gt;MariaDB&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;DynamoDB (NoSQL)&lt;/li&gt;

&lt;li&gt;RedShift (OLAP)&lt;/li&gt;

&lt;li&gt;Elasticache (in-memory)

&lt;ul&gt;
&lt;li&gt;Memcached&lt;/li&gt;
&lt;li&gt;Redis&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Multi-AZ&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Used for DR&lt;/li&gt;
&lt;li&gt;Not used for performance gains&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Read Replicas&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Used for scaling, performance gains&lt;/li&gt;
&lt;li&gt;You can have up to 5 Read Replicas&lt;/li&gt;
&lt;li&gt;You can have replicas of replicas (higher latency)&lt;/li&gt;
&lt;li&gt;Can be in a different region&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Aurora scaling&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;2 copies of data in each AZ, 3 AZ's minimunm (total of 6 copies)&lt;/li&gt;
&lt;li&gt;Designed to handle losses transparently&lt;/li&gt;
&lt;li&gt;Self-healing storage&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Aurora Replicas&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Up to 15 Replicas&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;MySQL Replicas&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Up to 5 Replicas&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;DynamoDB vs RDS&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;DynamoDB offers "push button" scaling&lt;/li&gt;
&lt;li&gt;RDS requires bigger instance size or to add Read Replica&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;DynamoDB&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;stored on SSD storage&lt;/li&gt;
&lt;li&gt;spread across 3 geographically distinc data centers&lt;/li&gt;
&lt;li&gt;Types

&lt;ul&gt;
&lt;li&gt;eventually consistent reads (default)&lt;/li&gt;
&lt;li&gt;strongly consistent reads&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Redshift Configuration&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Single Node (160GB)&lt;/li&gt;
&lt;li&gt;Multi-Node

&lt;ul&gt;
&lt;li&gt;Leader Node (manages client connections)&lt;/li&gt;
&lt;li&gt;Compute Node (stores data, performs queries, up to 128 nodes)&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Elasticache&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Memcached

&lt;ul&gt;
&lt;li&gt;Multi-AZ NOT available&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;Redis

&lt;ul&gt;
&lt;li&gt;Multi-AZ available&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Two types of backup:&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Automated -&amp;gt; retention period: between 1 and 35 days&lt;/li&gt;
&lt;li&gt;Database snapshots
Stored in S3. Not deleted when the RDS instance is deleted&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Encryption at rest: KMS. Can't enable encryption on existing DB. Must perform a copy and enable the encryption on the restored copy.&lt;br&gt;
SOA record stores:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;name of server that supplied data for the zone&lt;/li&gt;
&lt;li&gt;admin of the zone&lt;/li&gt;
&lt;li&gt;current version of the data file&lt;/li&gt;
&lt;li&gt;number of seconds a secondary name server should wait before checking for updates&lt;/li&gt;
&lt;li&gt;number of seconds a secondary name server should wait before retrying a failed zone transfer&lt;/li&gt;
&lt;li&gt;maximum number of seconds a secondary name server can use data before it must refresh or expire&lt;/li&gt;
&lt;li&gt;default number of seconds for the time-to-live (TTL) file on resource records&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;NS Records (Name Server Record)&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;used by top level domain server to direct traffic to the Content DNS server which contains authoritative DNS records&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;A Records (Address Record)&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;used to translate domain name to IP address&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;TTL Record (Time-To-Live Record)&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;The Length that a record is cached on either the Resolving SErver or the users local PC&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;CName Record (Canonical Name Record)&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;can be used to resolve one domain name to another&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Alias Record&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;works like CName record in that you can map one DNS name to another&lt;/li&gt;
&lt;li&gt;CName can't be used for naked domain names, can't have CName for violetfamily.com, it must be either A Record or Alias&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;EBS Vs Instance Store&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;All AMI's are categorized as either backed by Amazon EBS or backed by instance Store&lt;/li&gt;
&lt;li&gt;EBS Volumes:
--- The root device for an instance launched from the AMI is an Amazon EBS volume created from an Amazon EBS snapshot&lt;/li&gt;
&lt;li&gt;Instance Store Volumes:
--- The root device for an instance launched from the AMI is an instance store volume created from a template stored in Amazon S3
--- Sometimes called Ephemeral Storage
--- If host fails, you lose your data&lt;/li&gt;
&lt;li&gt;Only EBS backed instances can be stopped&lt;/li&gt;
&lt;li&gt;Both instances types can be rebooted&lt;/li&gt;
&lt;li&gt;By default, ROOT volumes will be deleted on termination, but with EBS volumes you can tell AWS to keep the root device volumeAutoScalling&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Launch configuration on Autoscalling group -&amp;gt; Choose AMI&lt;br&gt;
Can't change the AMI ID, it's chosen on creation&lt;/p&gt;

&lt;p&gt;Grace period: time that takes an instance to warm up. Will starts the checks after this period.&lt;/p&gt;

&lt;p&gt;You can find load logs related to autoscaling in&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Cloudwatch (metrics)&lt;/li&gt;
&lt;li&gt;Access logs&lt;/li&gt;
&lt;li&gt;Request tracing&lt;/li&gt;
&lt;li&gt;Cloud trail logs&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;How to register a LB group?&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Instance Id&lt;/li&gt;
&lt;li&gt;IP Address of the instance&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;3 ways to scale the servers&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Manual Scaling&lt;/li&gt;
&lt;li&gt;Dynamic scaling
-- In Target Tracking Scalling, you select a metric and set a target value, and EC2 Autoscalling sets the Cloudwatch Alarms to trigger the scaling based on the metric that you set (or as close as possible)
-- Step scaling allows you to "step up" the number of servers (IE, add 2, add another 2, add another 2, etc), depending on the alarm breach
-- Simple scaling increases the current capacity of the group based on a single scaling adjustment. If you can, use step scaling even if you have a single metric.&lt;/li&gt;
&lt;li&gt;Scheduled scaling
-- You can predict the load changes and how long you need it to run (IE, add 2 more servers between 9am and 12pm from Monday to Friday)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Volumes &amp;amp; Snapshots&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Volumes exist on EBS
--- Virtual Hard Disk&lt;/li&gt;
&lt;li&gt;Snapshots exist on S3&lt;/li&gt;
&lt;li&gt;Snapshots are point in time copies of Volumes&lt;/li&gt;
&lt;li&gt;Snapshots are incremental - only blocks that have changed since your lat snapshot are saved&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Snapshots of Root Device Volumes&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Can create AMI's from Volumes and Snapshots&lt;/li&gt;
&lt;li&gt;Can change volume sizes on the fly, including size and storage type. If you change a Volume on the fly, you have to wait 6 hours to change it again. Can't change volume type of Magnetic Std HD&lt;/li&gt;
&lt;li&gt;Volumes MUST BE in the same AZ as the EC2 instance&lt;/li&gt;
&lt;li&gt;If you need to restore/move a EBS Volume to another AZ, you need to create a snapshot of the volume, and create a new volume based on that snapshot, in the other AZ&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Encryption&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;To encrypt root volume, you need to create an AMI image of your boot disk first, OR use a third party software to encrypt&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Volumes Vs Snapshots - Security&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Snaps of encrypted volumes are encrypted automatically&lt;/li&gt;
&lt;li&gt;Volumes restored from encrypted snaps are encrypted automatically&lt;/li&gt;
&lt;li&gt;You can share snapshots only if they are unencrypted
--- Snaps can be shared with other AWS accounts or made public&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Default option is to delete volume when instance is terminated. Can be turned off in EC2 settings&lt;/p&gt;

&lt;p&gt;EBS Volumes only scale up; can't shrink in size&lt;br&gt;
Elastic File System (EFS) - file storage service for EC2 instances.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Supports NFSv4&lt;/li&gt;
&lt;li&gt;only pay for storage used&lt;/li&gt;
&lt;li&gt;can scale up to petabytes&lt;/li&gt;
&lt;li&gt;supports thousands of concurrent NFS connections&lt;/li&gt;
&lt;li&gt;multiple EC2 can point to the same EFS&lt;/li&gt;
&lt;li&gt;data is stored across multiple AZ's within a region. &lt;/li&gt;
&lt;li&gt;read after write consistency&lt;/li&gt;
&lt;li&gt;can restrict permission to file level or directory level&lt;/li&gt;
&lt;li&gt;can't mount an EFS in multiples VPC; only one at a time&lt;/li&gt;
&lt;li&gt;uses port 2049 (NFS)
-- file system and VPC must be in the same REGION &lt;/li&gt;
&lt;li&gt;Two types:
-- General purpose: low latency
-- Max IO: higher latency, but useful for big data
Application Load Balancers&lt;/li&gt;
&lt;li&gt;best suited for load balancing http and https traffic&lt;/li&gt;
&lt;li&gt;operate at layer 7&lt;/li&gt;
&lt;li&gt;application-aware&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Network Load Balancers&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;best suited for load balancing TCP traffic&lt;/li&gt;
&lt;li&gt;operate at layer 4&lt;/li&gt;
&lt;li&gt;can handle millions of requests per second with low latency&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Classic Load Balancers&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;legacy Elastic Load Balancer&lt;/li&gt;
&lt;li&gt;load balance HTTP/https&lt;/li&gt;
&lt;li&gt;operates at layer 7&lt;/li&gt;
&lt;li&gt;can use strict layer 4 load balancing&lt;/li&gt;
&lt;li&gt;if application stops responding, ELB responds with 504&lt;/li&gt;
&lt;li&gt;X-Forwarded-For header can pass on users public IP address&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;&lt;em&gt;Is the load balancer not answering?&lt;/em&gt;&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Internet facing load balancer is attached to PRIVATE SUBNET (should be in the public one)&lt;/li&gt;
&lt;li&gt;Security group ACL does not allow traffic
Placement Groups:&lt;/li&gt;
&lt;li&gt;Only certain types of instances can be launched in placement group (compute, GPU, Memory, Storage)&lt;/li&gt;
&lt;li&gt;AWS recommends homogeneous instances&lt;/li&gt;
&lt;li&gt;Can't merge placement Groups&lt;/li&gt;
&lt;li&gt;Can't move existing instance into a placement group&lt;/li&gt;
&lt;li&gt;Can create AMI from instance and launch that into placement group&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Clustered Placement Group:&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Grouping of instances within a single AZ. Recommended for applications that need low network latency, high network throughput, or both&lt;/li&gt;
&lt;li&gt;Can't spread multiple AZ&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Spread Placement Group:&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Group of instances that are each placed on distinct underlying hardware. Recommended for applications that have small number of critical instances that should be kept separate from each other&lt;/li&gt;
&lt;li&gt;Can spread over multiple AZExam Notes&lt;/li&gt;
&lt;li&gt;Security Group updates are applied immediately&lt;/li&gt;
&lt;li&gt;Security Groups are stateful (adding inbound rule automatically adds outbound rule)&lt;/li&gt;
&lt;li&gt;All inbound traffic is blocked by default&lt;/li&gt;
&lt;li&gt;All outbound traffic is allowed&lt;/li&gt;
&lt;li&gt;Any number of EC2 instances within a security Group&lt;/li&gt;
&lt;li&gt;You can have multiple security groups attached to an instances&lt;/li&gt;
&lt;li&gt;Security Groups are STATEFUL and Network Access Control Lists are STATELESS&lt;/li&gt;
&lt;li&gt;Cannot block specific IP addresses using Security Groups&lt;/li&gt;
&lt;li&gt;You can specify allow rules but not deny rulesEC2 - web service that provides resizable compute capacity in the cloud&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;On Demand&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;for users that want low cost and flexibility&lt;/li&gt;
&lt;li&gt;applications with short term, spiky, unpredictable workloads&lt;/li&gt;
&lt;li&gt;initial testing on EC2&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Reserved Instances&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;apps with steady or predictable usage&lt;/li&gt;
&lt;li&gt;applications that require reserved capacity&lt;/li&gt;
&lt;li&gt;users can make up-front payments to reduce total cost&lt;/li&gt;
&lt;li&gt;Standard Reserved Instance
--- up to 75% off on-demand cost&lt;/li&gt;
&lt;li&gt;Convertible Reserved Instance
--- up to 54% off on-demand cost
--- capability to change attributes of Reserved Instance as long as exchange results in creation of Reserved Instances of equal or greater value&lt;/li&gt;
&lt;li&gt;Scheduled Reserved Instance
--- available to launch within time window you reserved
--- allows you to match capacity reservation to a predictable, recurring schedule&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Spot Instances&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;flexible start and end times&lt;/li&gt;
&lt;li&gt;only feasible at very low compute prices&lt;/li&gt;
&lt;li&gt;users with urgent need for large amounts of additional computing capacity&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Dedicated Hosts&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;useful for regulatory requirements that may not support multi-tenant virtualization&lt;/li&gt;
&lt;li&gt;useful for licensing that doesn't support cloud deployments&lt;/li&gt;
&lt;li&gt;can be purchased on-demand&lt;/li&gt;
&lt;li&gt;can be purchased as reservation for up to 70% off on-demand price&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;em&gt;Instance Types&lt;/em&gt;&lt;br&gt;
&lt;code&gt;F&lt;/code&gt; - FPGA (Field Programmable Gate Array)&lt;br&gt;
&lt;code&gt;I&lt;/code&gt; - IOPS&lt;br&gt;
&lt;code&gt;G&lt;/code&gt; - Graphics&lt;br&gt;
&lt;code&gt;H&lt;/code&gt; - High Disk Throughput&lt;br&gt;
&lt;code&gt;T&lt;/code&gt; - cheap general purpose (think T3 micro)&lt;br&gt;
&lt;code&gt;D&lt;/code&gt; - Density&lt;br&gt;
&lt;code&gt;R&lt;/code&gt; - RAM&lt;br&gt;
&lt;code&gt;M&lt;/code&gt; - Main choice for general purpose apps&lt;br&gt;
&lt;code&gt;C&lt;/code&gt; - Compute&lt;br&gt;
&lt;code&gt;P&lt;/code&gt; - Graphics (think Pics)&lt;br&gt;
&lt;code&gt;X&lt;/code&gt; - Extreme Memory&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;EBS - Elastic Block Storage&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Attach block storage to EC2 instances&lt;/li&gt;
&lt;li&gt;placed in a specific AZ, automatically replicated to protect you from single-component failure&lt;/li&gt;
&lt;li&gt;if windows/linux installed on disk, it's called the "root device volume"&lt;/li&gt;
&lt;li&gt;Can't mount 1 EBS volume on multiple EC2 instances. Use EFS instead&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;EBS Volume Types&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;General Purpose SSD (GP2)
--- General purpose, price/performance balance
--- 3 IOPS/GB up to 10,000 IOPS and bursts up to 3,000 IOPS for extended periods for volumes 3334GB+
--- Less than 500 MiB/s&lt;/li&gt;
&lt;li&gt;Provisioned IOPS SDD (IO1)
--- Designed for IO intensive applications or NoSQL databases
--- Used when needing more than 10,000 IOPS
--- Can provision up to 20,000 IOPS/Volume
--- More than 500 MiB/s&lt;/li&gt;
&lt;li&gt;Throughput Optimized HDD (ST1)
--- Big Data
--- Data warehouses
--- Log processing
--- Cannot be boot volume&lt;/li&gt;
&lt;li&gt;Cold HDD (SC1)
--- Lowest cost storage, infrequently accessed
--- Typically a file server&lt;/li&gt;
&lt;li&gt;Magnetic (Standard)
--- Lowest cost per gigabyte of all volume times that are bootable
--- start dev here and move up when you're ready&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Instance Metadata&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;a href="http://169.254.169.254/latest/meta-data/" rel="noopener noreferrer"&gt;http://169.254.169.254/latest/meta-data/&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Status Checks:&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;System Status Checks: underlying layer (TCP, etc, to see if the instances recieves network packages)&lt;/li&gt;
&lt;li&gt;Instance Status Checks: software and network&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Termination protection is OFF by defaultECS (Elastic Container Service)&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;ECS: Elastic Container Service&lt;/li&gt;
&lt;li&gt;ECR: Elastic Container Registry &lt;/li&gt;
&lt;li&gt;Task definition: blueprint &lt;/li&gt;
&lt;li&gt;Service: Launches and maintains copies of tasks definitions&lt;/li&gt;
&lt;li&gt;Cluster: Where tasks runs. Set of containers running ECS Service&lt;/li&gt;
&lt;li&gt;Task: instaces of a task definition&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;ECS Cluster&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Container instance    \        - Task
                    Service /- Task&lt;/li&gt;
&lt;li&gt;Container instance    /       - Task&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  10,000-foot Overview
&lt;/h2&gt;

&lt;p&gt;Know EC2 Pricing Models&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;On Demand
--- Pay by the second or hour&lt;/li&gt;
&lt;li&gt;Reserved
--- Reserve capacity, contracts are from 12-36 months&lt;/li&gt;
&lt;li&gt;Spot
--- Set a bid price and if spot price meets your bid it will be provisioned
--- Instances terminated when spot price goes out of range
--- won't be charged if AWS terminates instance, but you will be charged if you terminate it&lt;/li&gt;
&lt;li&gt;Dedicated Hosts
--- Used when licensing or multi-tenant is an issue&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Know EC2 Instance Types&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;(&lt;code&gt;FIGHT DR MCPX&lt;/code&gt;)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Know EBS&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Storage Types
--- SSD, General Purpose GP2 (up to 10,000 IOPS, less than 500 MiB/sec)
--- SSD, Provisioned IOPS IO1 (MOre than 10,000 IOPS, more than 500 MiB/sec)
--- HDD, Throughput Optimized ST1 (frequently accessed workloads)
--- HDD, Magnetic Standard (cheap, infrequently accessed storage)&lt;/li&gt;
&lt;li&gt;Cannot mount EBS Volume to multiple EC2 instances; use EFS instead&lt;/li&gt;
&lt;li&gt;Termination Protection is turned off by default, you must turn it on&lt;/li&gt;
&lt;li&gt;On EBS-backed instance, default action is for the root EBS volume to be deleted when the instance is terminated&lt;/li&gt;
&lt;li&gt;EBS Root volumes of your DEFAULT AMI's cannot be encrypted (but third party tools can be used to encrypt)&lt;/li&gt;
&lt;li&gt;EBS Volumes can also be copied and then encrypted at that time&lt;/li&gt;
&lt;li&gt;Additional volumes can be encrypted&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Know Volumes Vs Snapshots&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Volumes exist on EBS, virtual hard disk in the cloud&lt;/li&gt;
&lt;li&gt;Snapshots exist on S3&lt;/li&gt;
&lt;li&gt;You can take a snapshot of a volume, the snapshot will be stored on S3&lt;/li&gt;
&lt;li&gt;Snapshots are point-in-time copies of volumes&lt;/li&gt;
&lt;li&gt;Snapshots are incremental&lt;/li&gt;
&lt;li&gt;First snapshot takes a while&lt;/li&gt;
&lt;li&gt;Security
--- Snapshots of encrypted volumes are encrypted automatically
--- Volumes restored from encrypted snapshots are encrypted automatically
--- Snapshots can be shared, but only if they are not encrypted&lt;/li&gt;
&lt;li&gt;Snapshots of ROOT Device Volumes
--- Stop instances before taking snapshot of ROOT volume&lt;/li&gt;
&lt;li&gt;EBS Vs. Instance Store (Ephemeral Storage)
--- Instance Store volumes cannot be stopped
--- If underlying host in Instance Store fails, you lose your data
--- EBS can be stopped
--- Both can be rebooted, you won't lose your data
--- Both will be deleted on termination, but EBS offers option to keep&lt;/li&gt;
&lt;li&gt;Snapshotting RAID Array
--- Freeze the file system
--- Unmount RAID Array
--- Shutdown EC2 Instance&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Know Amazon Machine Images (AMI)&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Regional, but can be copied to other regions&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Know CloudWatch (monitoring)&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Standard (5minutes)&lt;/li&gt;
&lt;li&gt;Detailed (1minute)&lt;/li&gt;
&lt;li&gt;CloudWatch is for performance monitoring&lt;/li&gt;
&lt;li&gt;Unlike CloudTrail, which is for auditing AWS&lt;/li&gt;
&lt;li&gt;Dashboards&lt;/li&gt;
&lt;li&gt;Alarms&lt;/li&gt;
&lt;li&gt;Events&lt;/li&gt;
&lt;li&gt;Logs&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Know Roles&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;More secure than storing access key and secret access keys on instances&lt;/li&gt;
&lt;li&gt;Easier to Manage&lt;/li&gt;
&lt;li&gt;Can be assigned to EC2 instance AFTER provisioning&lt;/li&gt;
&lt;li&gt;Universal to region&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Know Instance Meta-data&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Used to get information about an instance&lt;/li&gt;
&lt;li&gt;curl &lt;a href="http://169.254.169.254/latest/meta-data/" rel="noopener noreferrer"&gt;http://169.254.169.254/latest/meta-data/&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;curl &lt;a href="http://169.254.169.254/latest/user-data/" rel="noopener noreferrer"&gt;http://169.254.169.254/latest/user-data/&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Know EFS Features&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Supports NFSv4.1 protocol&lt;/li&gt;
&lt;li&gt;Only pay for storage used&lt;/li&gt;
&lt;li&gt;Can scale to petabytes&lt;/li&gt;
&lt;li&gt;Can support thousands of concurrent NFS connections&lt;/li&gt;
&lt;li&gt;Data stored across multiple AZ's&lt;/li&gt;
&lt;li&gt;Read After Write Consistency&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Know Lambda&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Event-driven compute service&lt;/li&gt;
&lt;li&gt;Compute service, run code in response to requests&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Know Placement Groups (assume clustered is implied, if not mentioned)&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Clustered Placement Groups
--- Always in one AZ, used for Big Data (low latency, high throughput)&lt;/li&gt;
&lt;li&gt;Spread Placement Groups
--- Important EC2 instance on separate hardware&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;KNOW Elastic Container Service (ECS)&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;S3 Exam Tips&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;S3 is object-based&lt;/li&gt;
&lt;li&gt;Files can be 0B to 5TB&lt;/li&gt;
&lt;li&gt;Unlimited storage&lt;/li&gt;
&lt;li&gt;Files are stored in Buckets&lt;/li&gt;
&lt;li&gt;Universal namespace, names must be unique&lt;/li&gt;
&lt;li&gt;Read after Write consistency for PUTS of new objects
--- Immediately able to read object&lt;/li&gt;
&lt;li&gt;Eventual Consistency for overwrite PUTS and DELETES (take time to propagate)
--- If you update object and then try to read you may get the old object&lt;/li&gt;
&lt;li&gt;Writing to S3 returns HTTP200 for successful write&lt;/li&gt;
&lt;li&gt;Loading files is faster when multipart upload is enabled&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Route53 exam tips&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;You can only resolve an ELB by going to it's DNS name&lt;/li&gt;
&lt;li&gt;ELB never has IPv4 address, only DNS names&lt;/li&gt;
&lt;li&gt;Understand difference between Alias Record and CName Record&lt;/li&gt;
&lt;li&gt;Given the choice, always choose Alias Record over a CName Record&lt;/li&gt;
&lt;li&gt;Understand routing policies and their use cases&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;VPC exam tips&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Think of VPC as logical datacenter in AWS&lt;/li&gt;
&lt;li&gt;Consists of IGW's (or virtual private gateways), route tables, NACL's, Subnets, Security Groups&lt;/li&gt;
&lt;li&gt;1 subnet = 1 AZ&lt;/li&gt;
&lt;li&gt;Security Groups are stateful; NACL's are Stateless&lt;/li&gt;
&lt;li&gt;NO TRANSITIVE PEERING&lt;/li&gt;
&lt;li&gt;If you need to access resources from another AWS account, you need to perform a VPC peering between both accounts. &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Load balancer tips&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;3 types of Load Balancers
--- Application Load Balancers (layer 7)
--- Network Load Balancers (layer 4)
--- Classic Load Balancers (layer 7 and layer 4)&lt;/li&gt;
&lt;li&gt;504 means the gateway has timed out. This means the application not responding within the idle timeout period.
--- Troubleshoot. Is it the web server or the database server?&lt;/li&gt;
&lt;li&gt;If you need IPv4 address of end user, look for the X-Forwarded-For header&lt;/li&gt;
&lt;li&gt;Instances monitored by ELB are reported as InService or OutofService&lt;/li&gt;
&lt;li&gt;Healthchecks instance by talking to it&lt;/li&gt;
&lt;li&gt;ELB'as have their own DNS name&lt;/li&gt;
&lt;li&gt;Read FAQ&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Exam Tips&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;ELB do not have pre-defined IPv4 addresses, must resolve using DNS name&lt;/li&gt;
&lt;li&gt;Understand the difference between Alias Record and CName Record&lt;/li&gt;
&lt;li&gt;Given the choice, always choose an Alias Record over CNameRoles are not tied to specific region (neither are users)
Can apply roles to running instances
If you apply a role to an instance, there's no need to configure the Access Keys / Secret keys to get permissions to use AWS Services (Ie, to access an private S3 bucket) --&amp;gt; MORE SECUREIdentity Access Management (IAM) - Allows you to manage users and their level of access to the AWS Console.&lt;/li&gt;
&lt;li&gt;Centralized control of AWS account&lt;/li&gt;
&lt;li&gt;Shared access to AWS account&lt;/li&gt;
&lt;li&gt;Granular permissions&lt;/li&gt;
&lt;li&gt;Identify Federation (AD, FB, LinkedIn, etc.)&lt;/li&gt;
&lt;li&gt;Multifactor Authentication&lt;/li&gt;
&lt;li&gt;Provide temporary access for users/devices/services&lt;/li&gt;
&lt;li&gt;Allows you to setup password rotation policy&lt;/li&gt;
&lt;li&gt;Integrates with many services&lt;/li&gt;
&lt;li&gt;Supports PCI DSS Compliance&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Critical Terms&lt;/strong&gt;&lt;br&gt;
Users - End users&lt;br&gt;
Groups - Collection of users under one set of permissions (Admins, HR, etc.)&lt;br&gt;
Roles - Create roles and assign them to AWS resources (i.e. giving EC2 instance role for writing to EC2)&lt;br&gt;
Policies - Document that defines one or more permissions. Apply policies to users, groups, and roles&lt;/p&gt;

&lt;p&gt;IAM does not use region concept. &lt;/p&gt;

&lt;p&gt;You can create cross-account roles (ie, you hire a company to perform audit, the user that you provide to the auditor can be cross-account)&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Never use your root account for daily use.&lt;/strong&gt; ALWAYS create new users&lt;/p&gt;

&lt;p&gt;Remember: Add user confirmation window (where the security and access key is shown) is only displayed ONCE. If you lose access, you will have to regenerate the keys.&lt;br&gt;
&lt;strong&gt;Kinesis&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Kinesis Stream&lt;/li&gt;
&lt;li&gt;Kinesis Firehose&lt;/li&gt;
&lt;li&gt;Kinesis Analytics&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Kinesis Streams&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;data stored for 24 hours by default&lt;/li&gt;
&lt;li&gt;data stored in shards&lt;/li&gt;
&lt;li&gt;data consumers (ec2 instances) turn shards into data to analyze&lt;/li&gt;
&lt;li&gt;5 transactions per second for reads, maximum total rate of 2 MB/second up to 1,000 records for writes&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Kinesis Firehose&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Automated&lt;/li&gt;
&lt;li&gt;no dealing with shards&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Kinesis Analytics&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Way of analyzing data in Kinesis using SQL-like queriesExam Tips&lt;/li&gt;
&lt;li&gt;Lambda scales out (not up) automatically&lt;/li&gt;
&lt;li&gt;Lambda functions are independent&lt;/li&gt;
&lt;li&gt;Lambda is serverless&lt;/li&gt;
&lt;li&gt;Know which AWS services are serverless&lt;/li&gt;
&lt;li&gt;Lambda functions can trigger other lambda functions&lt;/li&gt;
&lt;li&gt;Lambda is event driven (runs code in response to events)&lt;/li&gt;
&lt;li&gt;Architecture can get complicated, AWS X-ray helps to debug&lt;/li&gt;
&lt;li&gt;Lambda can do things globally&lt;/li&gt;
&lt;li&gt;Know your triggers
--- API Gateway, Alex Skills Kit, IoT, S3, DynamoDB, Cloudwatch, Cloudfront, DynamoDB, etc.&lt;/li&gt;
&lt;li&gt;Code supported: JS - Java - Python, C#, C++&lt;/li&gt;
&lt;li&gt;Pricing: first 1 million hits -&amp;gt; Free. 0.20 USD per million after&lt;/li&gt;
&lt;li&gt;Duration: can't run more than 15 mins (was recently raised, it was 5 mins before)&lt;/li&gt;
&lt;li&gt;The more memory/duration you need the function running, the higher the costLoad Balancers&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Types of Load Balancers:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Application load balancer (http / https level)&lt;/li&gt;
&lt;li&gt;Classic Network load balancer (TCP))&lt;/li&gt;
&lt;li&gt;Network load balancer (TCP/UDP)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Can be external (accessible via internet) or internal (balancing backend instances behind a subnet, for example)&lt;/p&gt;

&lt;p&gt;Performs health checks&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Unhealthy threadhold: number of consecutive checks failed&lt;/li&gt;
&lt;li&gt;Healthy threadhold: number of consecutive OK to consider healthy &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Load balancers only have HOSTNAMES, not IP address. This is because if a AZ goes down, it can move to another without problems &lt;br&gt;
 Routing Policies:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Simple
-- default routing policy when you create a new record set. Most commonly used when you have a single resource that performs a given function for your domain (e.g. one web server that serves content for violetfamily.com)
can point to a ELB that will later balance the load between N servers, but it's still pointing to a single item&lt;/li&gt;
&lt;li&gt;Weighted
-- allows you to split your traffic based on different weights assigned&lt;/li&gt;
&lt;li&gt;Latency
-- allows you to route your traffic based on the lowest latency for your end user (region with fastest response time)&lt;/li&gt;
&lt;li&gt;Failover
--- used when you want to create an active/passive setup (e.g. use primary site in US-EAST-1 and secondary DR Site in US-WEST-1).&lt;/li&gt;
&lt;li&gt;Geolocation&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Aliases can point to:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;ELB&lt;/li&gt;
&lt;li&gt;cloudfront&lt;/li&gt;
&lt;li&gt;S3 buckets&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;CNAME: Charged $$$&lt;br&gt;
ALIASES: FreeS3 - Simple Storage Service, provides developers and IT teams with secure, durable, highly-scalable, flat object storage.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Object-based storage:
--- Key
--- Value
--- Version ID
--- Metadata
--- Subresources:
--- --- Access Control Lists
--- --- Torrent&lt;/li&gt;
&lt;li&gt;Unlimited storage&lt;/li&gt;
&lt;li&gt;Files can be 0 Bytes to 5 Terabytes&lt;/li&gt;
&lt;li&gt;Files stored in buckets (basically just folders/logical separation)&lt;/li&gt;
&lt;li&gt;Bucket names have to be unique globally&lt;/li&gt;
&lt;li&gt;Successful upload will receive HTTP200 code&lt;/li&gt;
&lt;li&gt;Read after Write consistency for PUTS of new objects&lt;/li&gt;
&lt;li&gt;Eventual Consistency for overwrite PUTS and DELETES (can take some time)&lt;/li&gt;
&lt;li&gt;Built for 99.99% availability&lt;/li&gt;
&lt;li&gt;Amazon guarantees 99.999999999% durability (unlikely to ever lose a file) (11 9s)&lt;/li&gt;
&lt;li&gt;Tiered storage available&lt;/li&gt;
&lt;li&gt;Lifecycle management&lt;/li&gt;
&lt;li&gt;Versioning&lt;/li&gt;
&lt;li&gt;Encryption&lt;/li&gt;
&lt;li&gt;Secure your data using Access Control Lists and Bucket Policies&lt;/li&gt;
&lt;li&gt;Bucket tags are not inherited to files&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;S3 Storage Tiers&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;S3 Standard:
--- 99.99% available, 99.999999999% durable&lt;/li&gt;
&lt;li&gt;S3 IA (Infrequently Accessed):
--- Data that is accessed less often, but requires rapid access. Lower fee than S3 but charged retrieval fee.&lt;/li&gt;
&lt;li&gt;S3 One Zone IA:
--- Lower-cost option for IA but doesn't require multiple AZ resiliance&lt;/li&gt;
&lt;li&gt;Glacier:
--- Super cheap, used for archival only. Retrieval time takes 3-5 hours&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;S3 Charges&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Storage&lt;/li&gt;
&lt;li&gt;Requests&lt;/li&gt;
&lt;li&gt;Storage Management Pricing (tags)&lt;/li&gt;
&lt;li&gt;Data Transfer Pricing (cross-region replication)&lt;/li&gt;
&lt;li&gt;Transfer Accelleration - fast transfers over long distances using CloudFront&lt;/li&gt;
&lt;li&gt;Can configure bucket as Request Pays if you use multiple AWS accounts and multiple buckets that transfer info between them&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;S3 Versioning&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Stores all versions of an object (even if you delete an object)&lt;/li&gt;
&lt;li&gt;Once enabled, versioning cannot be disabled, only suspended&lt;/li&gt;
&lt;li&gt;Integrates with Lifecycle rules&lt;/li&gt;
&lt;li&gt;Versioning's MFA Delete capability, which uses MFA, can be used to provide additional layer of security&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;S3 Cross Region Replication&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Versioning must be enabled on both the source and destination buckets&lt;/li&gt;
&lt;li&gt;Regions must be unique&lt;/li&gt;
&lt;li&gt;Files in an existing bucket are not replicated automatically, all new and updated files will be replicated automatically&lt;/li&gt;
&lt;li&gt;You cannot replicate multiple buckets or daisy chain replication&lt;/li&gt;
&lt;li&gt;Delete markers are replicated&lt;/li&gt;
&lt;li&gt;Deleted individual versions or markers will not be replicated&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;S3 Lifecycle Management&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Can be used with versioning&lt;/li&gt;
&lt;li&gt;Can be applied to current and previous versions&lt;/li&gt;
&lt;li&gt;Transition to IA after 30 days is possible, if file is larger than 128k&lt;/li&gt;
&lt;li&gt;Archive to Glacier after 30 days is possible&lt;/li&gt;
&lt;li&gt;Can permanently delete after N days&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;S3 Security &amp;amp; Encryption&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;All newly created buckets are private by default&lt;/li&gt;
&lt;li&gt;You can setup access control for buckets using bucket policies and ACL&lt;/li&gt;
&lt;li&gt;Buckets can be configured to create access logs which log requests made to the bucket.&lt;/li&gt;
&lt;li&gt;Methods of Encryption
--- In Transit
--- --- SSL/TLS
--- At Rest
--- --- Server Side
--- --- --- S3 Manged Keys SSE-S3
--- --- --- AWS Key Management Service, Manged Keys SSE-KMS
--- --- --- Server Side Encryption with Customer Provided Keys SSE-C
--- --- Client Side Encryption&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;S3 Transfer Acceleration&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Uses CloudFront Edge Network to accelerate yoru uploads to S3&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;S3 Static Website Hosting&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;[bucketname].s3-website-[region].amazonaws.com&lt;/li&gt;
&lt;li&gt;CORS: you can enable cors on the bucket to allow other sites to get the files from the bucket&lt;/li&gt;
&lt;li&gt;If you want to host a static website in S3, just create a bucket name with the URL (IE, if you want to host something.com, create a bucket name with that name) and create an alias to that bucket. &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Dualstack: support for IPV4 and IPV6&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Storage Tiers&lt;/strong&gt;&lt;br&gt;
--- S3 Standard&lt;br&gt;
--- --- 99.99 available, 99.999999999 durable, designed to sustain loss of 2 facilities concurrently&lt;br&gt;
--- S3 IA (Infrequently Accessed)&lt;br&gt;
--- --- Accessed less frequently, requires rapid access when needed. Lower fee than S3 but charged for retrieval.&lt;br&gt;
--- S3 One Zone IA&lt;br&gt;
--- --- Want lower-cost for infrequent data but doesn't require multiple AZ resiliency&lt;br&gt;
--- Glacier&lt;br&gt;
--- --- Cheap, used for archival only. 3-5 hour retrieval time&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Core Fundamentals of S3:&lt;/strong&gt;&lt;br&gt;
--- Key&lt;br&gt;
--- Value&lt;br&gt;
--- Version ID&lt;br&gt;
--- Metadata&lt;br&gt;
--- Subresources&lt;br&gt;
---- Access Control Lists&lt;br&gt;
---- Torrent file&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Versioning&lt;/em&gt;&lt;br&gt;
---  Objected based storage (files only, not OS or db)&lt;br&gt;
---  All version of object are stored, writes and deletes&lt;br&gt;
---  Once enabled, versioning cannot be disabled, only suspended&lt;br&gt;
---  Integrates with Lifecycle rules&lt;br&gt;
---  Versioning's MFA Delete capability can be used to provide additional layer of security&lt;br&gt;
---  Cross Region Replication, requires versioning on source and destination buckets&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Lifecycle Management&lt;/em&gt;&lt;br&gt;
--- Can be used in conjunction with versioning&lt;br&gt;
--- Can be applied to current/previous versions&lt;br&gt;
--- Actions that can be done:&lt;br&gt;
--- --- Transition to Standard S3 IA after 30 days&lt;br&gt;
--- --- Archive to Glacier after 30 days&lt;br&gt;
--- --- Permanently Delete&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;CloudFront&lt;/strong&gt;&lt;br&gt;
--- Edge Location - location where content will be cached&lt;br&gt;
--- Origin - Origin of all files that CDN will distribute&lt;br&gt;
--- Distribution - name given to CDN which consists of collection of Edge Locations&lt;br&gt;
--- --- Web Distribution - typically used for websites&lt;br&gt;
--- --- RTMP Distributions - media streaming/flash files&lt;br&gt;
--- Edge locations are not just read only, you can write to them too&lt;br&gt;
--- Objects are cached for life of TTL (default 24 hours)&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Securing Buckets&lt;/strong&gt;&lt;br&gt;
--- Newly created buckets are private by default&lt;br&gt;
--- You can setup access control using:&lt;br&gt;
--- --- Bucket Policies&lt;br&gt;
--- --- Access Control Lists&lt;br&gt;
--- Buckets can be configured to create access logs&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Encryption&lt;/strong&gt;&lt;br&gt;
--- In Transit&lt;br&gt;
--- --- SSL/TLS&lt;br&gt;
--- At Rest&lt;br&gt;
--- --- Server Side Encryption&lt;br&gt;
--- --- --- S3 Manged Keys SSE-S3&lt;br&gt;
--- --- --- AWS Key Management Service, Manged Keys SSE-KMS&lt;br&gt;
--- --- --- Server Side Encryption with Customer Provided Keys SSE-C&lt;/p&gt;

&lt;h1&gt;
  
  
  Storage Gateways
&lt;/h1&gt;

&lt;p&gt;File Gateway - flat files, directly on S3&lt;br&gt;
--- Volume Gateway&lt;br&gt;
--- --- Stored Volumes - Entire dataset stored on site, async backed up to S3. Stores data as Amazon EBS snapshots in S3&lt;br&gt;
--- --- Cached Volumes - Entire dataset stored in S3, most recent data stored onsite&lt;br&gt;
--- Gateway Virtual Tape Library (VTL) - Used for backup and uses popular backup applications like NetBackup, Backup Exec, Veeam, etc.&lt;br&gt;
-- Network requirements: Port 443, 80 (activation only) , 3260 (iSCSI targets), UPD53 (dns)&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Snowball&lt;/strong&gt;&lt;br&gt;
--- Import to S3 or Export from S3&lt;br&gt;
--- Snowball&lt;br&gt;
--- --- 80TB, no compute&lt;br&gt;
--- Snowball Edge&lt;br&gt;
--- --- 100TB, has compute&lt;br&gt;
--- Snowmobile&lt;br&gt;
--- --- 100PB, semi-truck, only available in USA&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;S3 Transfer Acceleration&lt;/strong&gt;&lt;br&gt;
--- Speed up transfers to S3 using S3 transfer acceleration. Costs extra, great impact for people in distant locations.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;S3 Static websites&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Serverless&lt;/li&gt;
&lt;li&gt;Cheap, scales automatically&lt;/li&gt;
&lt;li&gt;Static only, no compute Security Token Service (SKS)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Grants users limited and temporary access to AWS services&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Federation (typically Active Directory) -- No need to create IAM accounts. Single sign on the AWS console. Combine users from 1 domain with users from another domain&lt;/li&gt;
&lt;li&gt;Federation with mobile apps - FB, AMZ, Google or other OpenID providers&lt;/li&gt;
&lt;li&gt;Cross Account Access: users from other AWS accounts&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Identity Broker: Service that allows to take identity from A and join it (federate it) to B. Impersonate.&lt;/p&gt;

&lt;p&gt;Identity Store: FB, Google, Active Directory, etc, all store the identity.&lt;/p&gt;

&lt;p&gt;Identity: the user itself.&lt;/p&gt;

&lt;p&gt;Identity broker needs to be programmed.&lt;br&gt;
The temp token returned by IAM Policy is valid between 1 to 36 hours&lt;br&gt;
STS returns 4 values if successful:&lt;br&gt;
1) access key&lt;br&gt;
2) secret access key&lt;br&gt;
3) token&lt;br&gt;
4) duration&lt;/p&gt;

&lt;p&gt;Steps:&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Develop identity broken to communicate with LDAP &amp;amp; AWS&lt;/li&gt;
&lt;li&gt;Identity broker (IB) &lt;em&gt;ALWAYS&lt;/em&gt; authenticate with LDAP first, then AWS
STS

&lt;ol&gt;
&lt;li&gt;App gets temporary access to AWS Resources&lt;/li&gt;
&lt;/ol&gt;


&lt;/li&gt;

&lt;/ol&gt;

&lt;p&gt;&lt;strong&gt;SAML&lt;/strong&gt;: &lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Secure Assertive Markup Language&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;Web Identity Federation with mobile apps: you can auth app using things like FB, you need to code it of course&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;ARN&lt;/strong&gt;: &lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Amazon Resource Name&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;&lt;code&gt;AssumeRoleWithWebIdentity&lt;/code&gt;: You need to call this method after auth with FB. After that you can access the AWS resouces.   &lt;/p&gt;

&lt;p&gt;Snowball - Petabyte-scale data transport solution that uses secure appliances to transfer large amounts of data into and out of AWS. Addresses challenges with large-scale data transfers including high network costs, long transfer times, and security concerns.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Storage only, up to &lt;code&gt;80TB&lt;/code&gt;
Snowball Edge - Snowball but with onboard compute functionality&lt;/li&gt;
&lt;li&gt;Storage up to &lt;code&gt;100TB&lt;/code&gt;
Snowmobile - Snowball for petabyte/exobyte amounts of data&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  - Storage up to &lt;code&gt;100PB&lt;/code&gt;
&lt;/h2&gt;

&lt;p&gt;&lt;strong&gt;Exam Notes&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Understand what Snowball is&lt;/li&gt;
&lt;li&gt;Understand what Import Export is (old name for Snowball)&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Snowball can:&lt;br&gt;
--- import to S3&lt;br&gt;
--- export from S3SNS&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Notification service. Data type: JSON&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Pub/Sub paradigm&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;PUSH mechanism. Instant&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Push notification&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Deliver SMS or Email, SQS, or any HTTP endpoint&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Message stored reduntly across multiple AZ&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Topic: access point&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Pay as you go&lt;br&gt;
-- 0.50 per 1 million request&lt;br&gt;
-- 0.75 per 100 sms&lt;br&gt;
-- 0.06 per 100,000 HTTP requests&lt;br&gt;
-- 2 usd per 100,000 emails&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Can use different format for different protocols (http/s, email, email json, SQS, lambda)&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Each message contains:&lt;br&gt;
-- Name&lt;br&gt;
-- Type&lt;br&gt;
-- Value&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Topic   -&amp;gt;  Subscriber 1 (http)&lt;br&gt;
    -&amp;gt;     Subscriber 2 (email)&lt;br&gt;
    --&amp;gt;    Subscriber 3 (SQS) &lt;/p&gt;

&lt;p&gt;You can apply a filter policy in a topic subscription (IE, only send critical errors to the managers)&lt;/p&gt;

&lt;h2&gt;
  
  
  SNS Vs SQS
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;SNS is push&lt;/li&gt;
&lt;li&gt;SQS is poll&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;SQS&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Message queue system. Message retained up to 14 days&lt;/li&gt;
&lt;li&gt;256K of text. Billed at 64K chunks&lt;/li&gt;
&lt;li&gt;Does NOT guarantee first in, first out. If you need that, use a FIFO SQS queue&lt;/li&gt;
&lt;li&gt;SQS pulls information&lt;/li&gt;
&lt;li&gt;Supports auto scalling&lt;/li&gt;
&lt;li&gt;Visibility timeout window: 12 hours max, default is 30 secs&lt;/li&gt;
&lt;li&gt;"At least once" -- each message is delivered at least once but maybe more. Keep that in mind&lt;/li&gt;
&lt;li&gt;First million SQS hits: free. 0.50 USD per million, per month, after that&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Single request can have from 1 to 10 messages.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Change visibility timeout with the "ChangeMessageVisibility" method&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Enable LONG POLLING (20 secs) to wait for a message to become available. Raise an event as soon as a message arrives, good for saving money/requests.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Has SNS integration, SQS subscribes to SNS topic. When SNS msg arrives, distribute msg to suscribed SQS queues. 1 SNS -&amp;gt; N SQS&lt;br&gt;
Storage Gateway - Service that connects an on-premise software appliance with cloud-based storage to provide seamless and secure integration between an organizations IT environment and AWS storage infrastructure.&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Types of Storage Gateways&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;File Gateway (NFS)&lt;/li&gt;
&lt;li&gt;Volumes Gateway (iSCSI)
--- Stored Volumes
--- Cached Volumes&lt;/li&gt;
&lt;li&gt;Tape Gateway (VTL)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;File Gateway - Files are stored as objects in your S3 buckets, accessed through NFS mount point. Once files are transferred they can be managed as native S3 objects.&lt;br&gt;
Volume gateway - Presents your applications with disk volumes using iSCSI block protocol.&lt;br&gt;
--- Can be asynchronously backed up as point-in-time snapshots of your volumes&lt;br&gt;
--- Stored in cloud as EBS snapshots&lt;br&gt;
--- Stored Volumes&lt;br&gt;
--- --- Store entire copy locally, asynchronouslybackup to AWS. Complete copy of data kept on-site.&lt;br&gt;
--- Cached Volumnes&lt;br&gt;
--- --- S3 is primary data storage and store only most recent copy locally. Don't need large storage arrays locally.&lt;br&gt;
Tape Gateway - Durabled, cost-effective solution to use existing tape-based backup solution, virtual tapes are sent to and stored in S3.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Exam Study Notes&lt;/strong&gt;&lt;br&gt;
File Gateway - for flat files, stored directly on S3&lt;br&gt;
Volume Gateway:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Stored Volumes - Entire dataset stored onsite and async backed up to S3&lt;/li&gt;
&lt;li&gt;Cached Volumes - Entire dataset stored in S3 and most recent data cached onsite
Gateway Virtual tape Library (VTL):&lt;/li&gt;
&lt;li&gt;Used for backup and uses popular backup applications like NetBackup, Backup Exec, Veeam, etc.SWF (Simple WorkFlow)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Workflow system&lt;/p&gt;

&lt;p&gt;Actor:&lt;br&gt;
        application to start/initiate workflow&lt;br&gt;
            could be website or mobile app, for example&lt;/p&gt;

&lt;p&gt;Worker: program/person that interacts with WF&lt;br&gt;
        Get task&lt;br&gt;
        Process recieved task&lt;br&gt;
        Return result&lt;/p&gt;

&lt;p&gt;Decider:&lt;br&gt;
        control coordination tasks&lt;br&gt;
        Ordering, concurrency, scheduling&lt;/p&gt;

&lt;p&gt;A task is designated once and never duplicated&lt;/p&gt;

&lt;p&gt;Domain:&lt;br&gt;
    Container where your WF runs&lt;br&gt;
    Isolate set of types, executions, and task lists from others in same account&lt;br&gt;
    Format: JSON&lt;/p&gt;

&lt;p&gt;A workflow can run for ONE YEAR (measures in seconds)&lt;/p&gt;

&lt;p&gt;Difference between SQS and SWF&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;SWF:&lt;/strong&gt;&lt;br&gt;
    * Task oriented API&lt;br&gt;
    * Task runs 1 (never duplicated)&lt;br&gt;
    * Keeps track tasks and events&lt;br&gt;
    * Human interaction if needed (ie, "Pick item from the storage")&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;SQS&lt;/strong&gt;&lt;br&gt;
    * Message oriented API&lt;br&gt;
    * Message might be duplicated&lt;br&gt;
    * Implement manual app trackingSolutions Architect - Associate (Understand VPC's inside and out)&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Analytics&lt;/li&gt;
&lt;li&gt;Management Tools&lt;/li&gt;
&lt;li&gt;Migration&lt;/li&gt;
&lt;li&gt;Compute&lt;/li&gt;
&lt;li&gt;Desktop &amp;amp; App Streaming&lt;/li&gt;
&lt;li&gt;Application Integration&lt;/li&gt;
&lt;li&gt;Security &amp;amp; Identify &amp;amp; Compliance&lt;/li&gt;
&lt;li&gt;Networking &amp;amp; Content Delivery&lt;/li&gt;
&lt;li&gt;Storage&lt;/li&gt;
&lt;li&gt;Databases&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Security Group&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;operates at the instance level&lt;/li&gt;
&lt;li&gt;supports allow rules only&lt;/li&gt;
&lt;li&gt;stateful&lt;/li&gt;
&lt;li&gt;evaluate all rules before deciding whether to allow traffic&lt;/li&gt;
&lt;li&gt;applies to an instance onlyExam Tips&lt;/li&gt;
&lt;li&gt;Cannot enable flow logs for VPC's that are peered with your VPC unless the peer VPC is in your account&lt;/li&gt;
&lt;li&gt;you cannot tag a flow log&lt;/li&gt;
&lt;li&gt;after flow log is created, you canot change its configuration&lt;/li&gt;
&lt;li&gt;not all IP traffic is monitored&lt;/li&gt;
&lt;li&gt;traffic from 169.254.169.254 not monitored&lt;/li&gt;
&lt;li&gt;DHCP traffic not monitored&lt;/li&gt;
&lt;li&gt;traffic to reserved IP address for VPC router is not monitoredExam Tips&lt;/li&gt;
&lt;li&gt;NAT is used to provide internet traffic to EC2 instances in private subnets&lt;/li&gt;
&lt;li&gt;Bastion is used to securely administer EC2 in private subnets (jump box)NAT gateways&lt;/li&gt;
&lt;li&gt;Preferred by the enterprise&lt;/li&gt;
&lt;li&gt;Scale automatically up to 10Gbps&lt;/li&gt;
&lt;li&gt;No need to patch&lt;/li&gt;
&lt;li&gt;Not associated with security groups&lt;/li&gt;
&lt;li&gt;Automatically assigned a public ip address&lt;/li&gt;
&lt;li&gt;Remember to update route tables and point to NAT Gateways&lt;/li&gt;
&lt;li&gt;No need to disable source/destination checks&lt;/li&gt;
&lt;li&gt;More secure than a NAT instanceVPC&lt;/li&gt;
&lt;li&gt;Logically isloated section of the AWS Cloud where you can launch AWS resources in a virtual network that you define.&lt;/li&gt;
&lt;li&gt;Virtual Data Center in the cloud&lt;/li&gt;
&lt;li&gt;Amazon provides you a default VPC in every region when you create your account&lt;/li&gt;
&lt;li&gt;Can create hardware VPN between corporate datacenter and your VPC, make AWS your extension&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;What can be done:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Launch instances into subnet of choice&lt;/li&gt;
&lt;li&gt;Assign custom IP address range to each subnet&lt;/li&gt;
&lt;li&gt;Configure route tables between subnets&lt;/li&gt;
&lt;li&gt;Create internet gateway and attach it to our VPC, one per VPC&lt;/li&gt;
&lt;li&gt;Better security control over AWS resources&lt;/li&gt;
&lt;li&gt;Instance security groups&lt;/li&gt;
&lt;li&gt;Subnet Access Control Lists (ACLs)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Route Tables determines if a subnet is REACHEABLE&lt;br&gt;
Network ACL determines if traffic CAN ENTER a subnet &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Default VPC Vs. Custom VPC&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Default VPC is user friendly, allows you to immediately deply&lt;/li&gt;
&lt;li&gt;All subnets in default VPC have a route out to the internet&lt;/li&gt;
&lt;li&gt;Each EC2 instance has public and private IP address&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;VPC Peering&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Allows you to connect one VPC with another via a direct network route using private IP Addresses&lt;/li&gt;
&lt;li&gt;Instances behave as if they were on the same private network&lt;/li&gt;
&lt;li&gt;You can peer VPC's with other AWS accounts as well as with other VPC's in same account&lt;/li&gt;
&lt;li&gt;Peering is in star configuration: 1 central VPC peer with 4 others---no transitive peering&lt;/li&gt;
&lt;li&gt;Use C5 or M5 instances for VPC peering&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Bastion hosts&lt;/strong&gt;&lt;br&gt;
    Bastion hosts are used to security administer EC2 instances via SSH or RDP. Can also be called jump box. More secure than opening all your servers to the world to SSH/RDP&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;NAT Instances&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;When creating a NAT instance, disable source/destination check on the instance&lt;/li&gt;
&lt;li&gt;NAT instances must be in a public subnet&lt;/li&gt;
&lt;li&gt;There must be a route out of the private subnet to the NAT instance for this to work&lt;/li&gt;
&lt;li&gt;The amount of traffic that NAT instances can support depends on the instance size&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;You can create HA using:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Autoscaling Groups&lt;/li&gt;
&lt;li&gt;Multiple subnets in different AZs&lt;/li&gt;
&lt;li&gt;Script to automate failover&lt;/li&gt;
&lt;li&gt;Behind Security groups&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;NAT gateways&lt;/strong&gt;&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;&lt;p&gt;Preferred by the enterprise&lt;/p&gt;&lt;/li&gt;

&lt;li&gt;&lt;p&gt;Scale automatically up to 10Gbps&lt;/p&gt;&lt;/li&gt;

&lt;li&gt;&lt;p&gt;No need to patch&lt;/p&gt;&lt;/li&gt;

&lt;li&gt;&lt;p&gt;Not associated with security groups&lt;/p&gt;&lt;/li&gt;

&lt;li&gt;&lt;p&gt;Automatically assigned a public ip address&lt;/p&gt;&lt;/li&gt;

&lt;li&gt;&lt;p&gt;Remember to update route tables and point to NAT Gateways&lt;/p&gt;&lt;/li&gt;

&lt;li&gt;&lt;p&gt;No need to disable source/destination checks&lt;/p&gt;&lt;/li&gt;

&lt;li&gt;&lt;p&gt;More secure than a NAT instance&lt;/p&gt;&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Difference between NAT Gateway and Internet Gateway&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Both are highly available architectures &lt;/li&gt;
&lt;li&gt;Both are used to enable instances in a private subnet to connect to the internet or other AWS services&lt;/li&gt;
&lt;li&gt;An Internet Gateway (IGW) allows resources within your VPC to access the internet, &lt;strong&gt;and vice versa&lt;/strong&gt;. 

&lt;ul&gt;
&lt;li&gt;In order for this to happen, there needs to be a routing table entry allowing a subnet to access the IGW.&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;NAT Gateway is only from instance to internet (you can download things from the EC2, but internet can't access the server).

&lt;ul&gt;
&lt;li&gt;The internet at large cannot get through your NAT to your private resources unless you explicitly allow it.&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;Nat Gateway can only scale up to 45GB. Keep in mind if bandwidth is an issue.&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Network ACL's&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;VPC automatically comes with default network ACL and by default allows all in/outbound traffic&lt;/li&gt;
&lt;li&gt;You can create custom network ACL's. By default each network ACL denies all in/outbound traffic&lt;/li&gt;
&lt;li&gt;Each subnet in VPC must be associated with a network ACL, uses default ACL by default&lt;/li&gt;
&lt;li&gt;You can associate network ACL with multiple subnets, however subnet can only associate with one ACL at a time&lt;/li&gt;
&lt;li&gt;Adding subnet to a second ACL will automatically remove it from the previous ACL&lt;/li&gt;
&lt;li&gt;network ACL contains numbered list of rules that is evaluated in order, starting with lowest number&lt;/li&gt;
&lt;li&gt;network ACL always have separate inbound and outbound rules&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;network ACL's are stateless&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;VPC Interface Endpoints
-- API Gateway, Cloudwatch, Config, Kinesis, SNS, etc.&lt;/li&gt;
&lt;li&gt;VPC Gateway Endpoints
This is used so the traffic does not go out to the Internet and back in (remains in the private network, faster and more secure)
-- S3 
-- DynamoDb&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

</description>
      <category>aws</category>
      <category>certified</category>
      <category>awscsaa</category>
      <category>study</category>
    </item>
    <item>
      <title>CapitalOne AWS breach &amp; AWS security discussion</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Tue, 30 Jul 2019 17:04:56 +0000</pubDate>
      <link>https://dev.to/dietertroy/capitalone-aws-breach-aws-security-discussion-mo9</link>
      <guid>https://dev.to/dietertroy/capitalone-aws-breach-aws-security-discussion-mo9</guid>
      <description>&lt;p&gt;CapitalOne was recently victim to a leak that &lt;a href="https://www.bloomberg.com/news/articles/2019-07-29/capital-one-data-systems-breached-by-seattle-woman-u-s-says"&gt;may have affected 100 million individuals&lt;/a&gt; in the US. This post will not go into the unethical intent of the accused or the affect of private information being leaked, but rather the technical aspects and securing your AWS environment. If you'd like to read into it further, &lt;a href="https://regmedia.co.uk/2019/07/29/capital_one_paige_thompson.pdf"&gt;this document&lt;/a&gt; does a fair job at explaining it.&lt;/p&gt;

&lt;p&gt;The diagram below is my understanding of how the CapitalOne S3 data was compromised. Background information about the accused also states &lt;a href="https://imgur.com/NezWVKw"&gt;that she worked for Amazon Web Services&lt;/a&gt; from 2015-2016 in the S3 division. Knowing that she worked for Amazon Web Services for a period of time very well may have affected her ability to compromise CapitalOne.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--PxnhWr-0--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/static/cap1_compromise.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--PxnhWr-0--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://www.troydieter.com/static/cap1_compromise.png" alt="diagram"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;There seems to be still unanswered questions, such as:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;What credentials were exploited for the accused to assume the STS role to gain access?&lt;/li&gt;
&lt;li&gt;Why wasn't AWS GuardDuty's &lt;a href="https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_upload_lists.html"&gt;trusted list&lt;/a&gt; being maintained?&lt;/li&gt;
&lt;li&gt;Why was the STS role that was assumed allowed access to the S3 bucket? A &lt;a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html"&gt;strict bucket policy&lt;/a&gt; with allowed ARN's should've been used.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;What is your opinion on the technical aspect of the exploitation and securing an AWS environment?&lt;/p&gt;

</description>
      <category>aws</category>
      <category>sec</category>
      <category>netsec</category>
      <category>iam</category>
    </item>
    <item>
      <title>draw.io with Git source control management?</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Thu, 25 Jul 2019 13:56:28 +0000</pubDate>
      <link>https://dev.to/dietertroy/draw-io-with-git-source-control-management-1pcl</link>
      <guid>https://dev.to/dietertroy/draw-io-with-git-source-control-management-1pcl</guid>
      <description>&lt;p&gt;I'm a huge fan of &lt;a href="https://www.draw.io" rel="noopener noreferrer"&gt;draw.io&lt;/a&gt; - a tool used to draw network topology, UML, architecture &amp;amp; more. Our team uses the tool pretty heavily, albeit independent to each other and not necessarily sharing.&lt;/p&gt;

&lt;p&gt;I had a thought.. you have the ability to export as .xml. Each user edits their diagram, exports as .&lt;em&gt;xml&lt;/em&gt; (to device) and adds to the SCM repository. If another user wants to edit it, they check-out the branch.. make their edits and check in/add a pull request/etc.&lt;/p&gt;

&lt;p&gt;Quick diagram using draw.io:&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fedit_drawio_process.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fedit_drawio_process.png" alt="diagram"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;draw.io + SCM = &lt;strong&gt;success&lt;/strong&gt;? &lt;/p&gt;

&lt;p&gt;Has anyone else used this type of combination?&lt;/p&gt;

</description>
      <category>diagram</category>
      <category>scm</category>
      <category>git</category>
      <category>bitbucket</category>
    </item>
    <item>
      <title>Discussion: Why doesn't Netflix, one of the most shared streaming services fail to offer 2FA (two factor authentication)?</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Mon, 22 Jul 2019 01:33:18 +0000</pubDate>
      <link>https://dev.to/dietertroy/discussion-why-doesn-t-netflix-one-of-the-most-shared-streaming-services-offer-2fa-two-factor-authentication-3c56</link>
      <guid>https://dev.to/dietertroy/discussion-why-doesn-t-netflix-one-of-the-most-shared-streaming-services-offer-2fa-two-factor-authentication-3c56</guid>
      <description>&lt;p&gt;Netflix, one of the largest streaming services in the world maintains millions of subscribers a year. This post doesn't cover the content or the subscribers, but rather poses an excellent question. &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Netflix: why no 2FA for the login process?!&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;2FA, also known as multi-factor authentication or two factor authentication provides an additional layer of security for an authentication mechanism. &lt;br&gt;
WIKI definition:&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;&lt;strong&gt;Multi-factor authentication&lt;/strong&gt;  (&lt;strong&gt;MFA&lt;/strong&gt;) is an  &lt;a href="https://en.wikipedia.org/wiki/Authentication"&gt;authentication&lt;/a&gt;  method in which a  &lt;a href="https://en.wikipedia.org/wiki/Computer_user"&gt;computer&lt;br&gt;
user&lt;/a&gt;  is&lt;br&gt;
granted access only after successfully presenting two or more pieces&lt;br&gt;
of evidence (or factors) to an &lt;br&gt;
&lt;a href="https://en.wikipedia.org/wiki/Authentication"&gt;authentication&lt;/a&gt;  mechanism: knowledge (something the user and only&lt;br&gt;
the user knows), possession (something the user and only the user&lt;br&gt;
has), and inherence (something the user and only the user&lt;br&gt;
is).&lt;a href="https://en.wikipedia.org/wiki/Multi-factor_authentication#cite_note-1"&gt;[1]&lt;/a&gt;&lt;a href="https://en.wikipedia.org/wiki/Multi-factor_authentication#cite_note-2"&gt;[2]&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Two-factor authentication&lt;/strong&gt;  (also known as  &lt;strong&gt;2FA&lt;/strong&gt;) is a type, or subset, of multi-factor authentication. It is a method of confirming&lt;br&gt;
users' claimed identities by using a combination of  &lt;em&gt;two&lt;/em&gt;  different&lt;br&gt;
factors: 1) something they know, 2) something they have, or 3)&lt;br&gt;
something they are.&lt;/p&gt;

&lt;p&gt;A good example of two-factor authentication is the withdrawing of&lt;br&gt;
money from an &lt;br&gt;
&lt;a href="https://en.wikipedia.org/wiki/Automated_teller_machine"&gt;ATM&lt;/a&gt;; only the correct combination of a  &lt;a href="https://en.wikipedia.org/wiki/Bank_card"&gt;bank&lt;br&gt;
card&lt;/a&gt;  (something&lt;br&gt;
the user possesses) and a &lt;br&gt;
&lt;a href="https://en.wikipedia.org/wiki/Personal_identification_number"&gt;PIN&lt;/a&gt;  (something the user knows) allows&lt;br&gt;
the transaction to be carried out.&lt;/p&gt;

&lt;p&gt;Two other examples are to supplement a user-controlled password with a&lt;br&gt;
&lt;a href="https://en.wikipedia.org/wiki/One-time_password"&gt;one-time password&lt;/a&gt;  (OTP) or code generated or received by an &lt;br&gt;
&lt;a href="https://en.wikipedia.org/wiki/Authenticator"&gt;authenticator&lt;/a&gt;  (e.g. a security token or smartphone) that only the&lt;br&gt;
user&lt;br&gt;
possesses.&lt;a href="https://en.wikipedia.org/wiki/Multi-factor_authentication#cite_note-IOVATION-3"&gt;[3]&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Two-step verification&lt;/strong&gt;  or  &lt;strong&gt;two-step authentication&lt;/strong&gt;  is a method of confirming a user's claimed identity by utilizing something&lt;br&gt;
they know (password) and a second factor  &lt;em&gt;other&lt;/em&gt;  than something they&lt;br&gt;
have or something they are. An example of a second step is the user&lt;br&gt;
repeating back something that was sent to them through an &lt;br&gt;
&lt;a href="https://en.wikipedia.org/wiki/Out-of-band"&gt;out-of-band&lt;/a&gt;&lt;br&gt;
mechanism. Or, the second step might be a six digit number generated&lt;br&gt;
by an  &lt;a href="https://en.wikipedia.org/wiki/Mobile_app"&gt;app&lt;/a&gt; &lt;br&gt;
that is common to the user and the  &lt;a href="https://en.wikipedia.org/wiki/Authentication_and_Key_Agreement"&gt;authentication&lt;br&gt;
system&lt;/a&gt;.&lt;a href="https://en.wikipedia.org/wiki/Multi-factor_authentication#cite_note-4"&gt;[4]&lt;/a&gt;&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;Netflix does not currently offer any forms of the above security. Why? Many claim that the engineering effort would not be worth it, or that their is not private information to protect. I'd argue these points and state that your:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Mailing address&lt;/li&gt;
&lt;li&gt;Billing address&lt;/li&gt;
&lt;li&gt;Last four of your credit card or PayPal (or billing method)&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;would be considered private information among many. A problem of unauthorized login sharing of Netflix credentials is rampant; 2FA would assist with preventing this. &lt;/p&gt;

&lt;p&gt;The question is -- &lt;strong&gt;what's your take on why Netflix has yet to implement increased security measures for its users? Why no 2FA?&lt;/strong&gt;&lt;/p&gt;

</description>
      <category>netflix</category>
      <category>sec</category>
      <category>twofactorauth</category>
      <category>netsec</category>
    </item>
    <item>
      <title>Diagramming your AWS VPC &amp; IAM structure</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Mon, 22 Jul 2019 00:52:01 +0000</pubDate>
      <link>https://dev.to/dietertroy/diagramming-your-aws-vpc-iam-structure-593c</link>
      <guid>https://dev.to/dietertroy/diagramming-your-aws-vpc-iam-structure-593c</guid>
      <description>&lt;p&gt;The snowball effect for organizations &amp;amp; startups using AWS is a real thing. You may start experimenting with using one of the cloud platforms (AWS in this article) and soon to find out you have &lt;em&gt;quite the labyrinth&lt;/em&gt; of policies, groups, users, access keys and more. This handy tool developed by the security company DUO (now owned by Cisco) will help you untangle the ball of yarn that started with a back &amp;amp; forth between you and the developers.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;&lt;a href="https://github.com/duo-labs/cloudmapper" rel="noopener noreferrer"&gt;DUO CloudMapper&lt;/a&gt;&lt;/strong&gt; is a tool that has grown from originally diagramming your AWS &lt;em&gt;Virtual Private Cloud&lt;/em&gt; to now including IAM reports and much more.&lt;/p&gt;

&lt;p&gt;A few examples of my personal favorite components of the CloudMapper tool are the &lt;a href="https://raw.githubusercontent.com/duo-labs/cloudmapper/master/docs/images/report_resources.png" rel="noopener noreferrer"&gt;resource aggregation&lt;/a&gt; along with the IAM reporting for identifying &lt;a href="https://raw.githubusercontent.com/duo-labs/cloudmapper/master/docs/images/report_findings.png" rel="noopener noreferrer"&gt;policy best-practices and potential&lt;/a&gt; problems.&lt;/p&gt;

&lt;p&gt;Here are a few screenshots from the tool and it's components:&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fduo-cloudmapper%2Fthumbnail%2Fiam_report-inactive_and_detail.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fduo-cloudmapper%2Fthumbnail%2Fiam_report-inactive_and_detail.jpg" alt="principals"&gt;&lt;/a&gt;&lt;br&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fduo-cloudmapper%2Fthumbnail%2Fideal_layout.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fduo-cloudmapper%2Fthumbnail%2Fideal_layout.jpg" alt="layout"&gt;&lt;/a&gt;&lt;br&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fduo-cloudmapper%2Fthumbnail%2Freport_findings.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fduo-cloudmapper%2Fthumbnail%2Freport_findings.jpg" alt="report findings"&gt;&lt;/a&gt;&lt;br&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fduo-cloudmapper%2Fthumbnail%2Freport_findings_summary.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fduo-cloudmapper%2Fthumbnail%2Freport_findings_summary.jpg" alt="report findings summary"&gt;&lt;/a&gt;&lt;br&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fduo-cloudmapper%2Fthumbnail%2Freport_resources.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fduo-cloudmapper%2Fthumbnail%2Freport_resources.jpg" alt="report resources"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;For installation, code repository and more &lt;a href="https://github.com/duo-labs/cloudmapper" rel="noopener noreferrer"&gt;visit the official DUO CloudMapper GitHub page&lt;/a&gt;. Cheers!&lt;/p&gt;

</description>
      <category>aws</category>
      <category>iam</category>
      <category>vpc</category>
      <category>topology</category>
    </item>
    <item>
      <title>AWS Lambda backups of Route53 -&gt; S3</title>
      <dc:creator>Troy</dc:creator>
      <pubDate>Fri, 19 Jul 2019 17:21:48 +0000</pubDate>
      <link>https://dev.to/dietertroy/aws-lambda-backups-of-route53-s3-1id7</link>
      <guid>https://dev.to/dietertroy/aws-lambda-backups-of-route53-s3-1id7</guid>
      <description>&lt;p&gt;Need a way to automatically back up your AWS Route53 public DNS zones? Look no further, as a combination of the following AWS products can fit the need:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Lambda&lt;/li&gt;
&lt;li&gt;Route53&lt;/li&gt;
&lt;li&gt;CloudWatch&lt;/li&gt;
&lt;li&gt;S3&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Here's a diagram of the process:&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fbackup.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fwww.troydieter.com%2Fstatic%2Fbackup.png" alt="process"&gt;&lt;/a&gt;&lt;br&gt;
This will execute a Lambda function every 6 hours (or whichever you set the CloudWatch event to). It will use the IAM role to export your Route53 public zones as a CSV &amp;amp; JSON to the S3 bucket of your choice.&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Create a S3 private bucket, as it will be your destination for the backups.&lt;/li&gt;
&lt;li&gt;Set the &lt;em&gt;s3_bucket_name&lt;/em&gt; variable to your AWS S3 bucket name.&lt;/li&gt;
&lt;li&gt;Set the &lt;em&gt;s3_bucket_region&lt;/em&gt; variable to your AWS S3 region.&lt;/li&gt;
&lt;li&gt;Create an IAM role with an attached policy for Route53 read-only and S3 read/write to your S3 Bucket. &lt;a href="https://paste.fedoraproject.org/paste/solHO-oqIIm0vy5vDam9eg" rel="noopener noreferrer"&gt;I have provided an example here.&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;Create a CloudWatch event for every 6 hours (or desired recurring duration).&lt;/li&gt;
&lt;li&gt;Upload the below Lambda Python function (copy and save it as &lt;em&gt;aws_s3_route53.py&lt;/em&gt; for example).&lt;/li&gt;
&lt;li&gt;Assign the execution role to the IAM role created in step 4, and use the scheduled CloudWatch event trigger created in step 5.&lt;/li&gt;
&lt;li&gt;Check the S3 bucket for your backups and verify.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;Now you can sleep a bit more peacefully knowing that you when\if you blow out a record-set in your hosted public zone, &lt;strong&gt;&lt;em&gt;you'll have a backup&lt;/em&gt;&lt;/strong&gt;!&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;aws_s3_route53_backups.py&lt;/strong&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;"""AWS Route 53 Lambda Backup"""

import os
import csv
import json
import time
from datetime import datetime
import boto3
from botocore.exceptions import ClientError


# Set environmental variables

s3_bucket_name = ''
s3_bucket_region = ''

try:
    s3_bucket_name = os.environ['s3_bucket_name']
    s3_bucket_region = os.environ['s3_bucket_region']
except KeyError as e:
    print("Warning: Environmental variable(s) not defined")


# Create client objects

s3 = boto3.client('s3', region_name='us-east-1')
route53 = boto3.client('route53')


# Functions

def create_s3_bucket(bucket_name, bucket_region='us-east-1'):
    """Create an Amazon S3 bucket."""
    try:
        response = s3.head_bucket(Bucket=bucket_name)
        return response
    except ClientError as e:
        if(e.response['Error']['Code'] != '404'):
            print(e)
            return None
    # creating bucket in us-east-1 (N. Virginia) requires
    # no CreateBucketConfiguration parameter be passed
    if(bucket_region == 'us-east-1'):
        response = s3.create_bucket(
            ACL='private',
            Bucket=bucket_name
        )
    else:
        response = s3.create_bucket(
            ACL='private',
            Bucket=bucket_name,
            CreateBucketConfiguration={
                'LocationConstraint': bucket_region
            }
        )
    return response


def upload_to_s3(folder, filename, bucket_name, key):
    """Upload a file to a folder in an Amazon S3 bucket."""
    key = folder + '/' + key
    s3.upload_file(filename, bucket_name, key)


def get_route53_hosted_zones(next_zone=None):
    """Recursively returns a list of hosted zones in Amazon Route 53."""
    if(next_zone):
        response = route53.list_hosted_zones_by_name(
            DNSName=next_zone[0],
            HostedZoneId=next_zone[1]
        )
    else:
        response = route53.list_hosted_zones_by_name()
    hosted_zones = response['HostedZones']
    # if response is truncated, call function again with next zone name/id
    if(response['IsTruncated']):
        hosted_zones += get_route53_hosted_zones(
            (response['NextDNSName'],
            response['NextHostedZoneId'])
        )
    return hosted_zones


def get_route53_zone_records(zone_id, next_record=None):
    """Recursively returns a list of records of a hosted zone in Route 53."""
    if(next_record):
        response = route53.list_resource_record_sets(
            HostedZoneId=zone_id,
            StartRecordName=next_record[0],
            StartRecordType=next_record[1]
        )
    else:
        response = route53.list_resource_record_sets(HostedZoneId=zone_id)
    zone_records = response['ResourceRecordSets']
    # if response is truncated, call function again with next record name/id
    if(response['IsTruncated']):
        zone_records += get_route53_zone_records(
            zone_id,
            (response['NextRecordName'],
            response['NextRecordType'])
        )
    return zone_records


def get_record_value(record):
    """Return a list of values for a hosted zone record."""
    # test if record's value is Alias or dict of records
    try:
        value = [':'.join(
            ['ALIAS', record['AliasTarget']['HostedZoneId'],
            record['AliasTarget']['DNSName']]
        )]
    except KeyError:
        value = []
        for v in record['ResourceRecords']:
            value.append(v['Value'])
    return value


def try_record(test, record):
    """Return a value for a record"""
    # test for Key and Type errors
    try:
        value = record[test]
    except KeyError:
        value = ''
    except TypeError:
        value = ''
    return value


def write_zone_to_csv(zone, zone_records):
    """Write hosted zone records to a csv file in /tmp/."""
    zone_file_name = '/tmp/' + zone['Name'] + 'csv'
    # write to csv file with zone name
    with open(zone_file_name, 'w', newline='') as csv_file:
        writer = csv.writer(csv_file)
        # write column headers
        writer.writerow([
            'NAME', 'TYPE', 'VALUE',
            'TTL', 'REGION', 'WEIGHT',
            'SETID', 'FAILOVER', 'EVALUATE_HEALTH'
            ])
        # loop through all the records for a given zone
        for record in zone_records:
            csv_row = [''] * 9
            csv_row[0] = record['Name']
            csv_row[1] = record['Type']
            csv_row[3] = try_record('TTL', record)
            csv_row[4] = try_record('Region', record)
            csv_row[5] = try_record('Weight', record)
            csv_row[6] = try_record('SetIdentifier', record)
            csv_row[7] = try_record('Failover', record)
            csv_row[8] = try_record('EvaluateTargetHealth',
                try_record('AliasTarget', record)
            )
            value = get_record_value(record)
            # if multiple values (e.g., MX records), write each as its own row
            for v in value:
                csv_row[2] = v
                writer.writerow(csv_row)
    return zone_file_name


def write_zone_to_json(zone, zone_records):
    """Write hosted zone records to a json file in /tmp/."""
    zone_file_name = '/tmp/' + zone['Name'] + 'json'
    # write to json file with zone name
    with open(zone_file_name, 'w') as json_file:
        json.dump(zone_records, json_file, indent=4)
    return zone_file_name


## HANDLER FUNCTION ##

def lambda_handler(event, context):
    """Handler function for AWS Lambda"""
    time_stamp = time.strftime("%Y-%m-%dT%H:%M:%SZ",
        datetime.utcnow().utctimetuple()
    )
    if(not create_s3_bucket(s3_bucket_name, s3_bucket_region)):
        return False
    #bucket_response = create_s3_bucket(s3_bucket_name, s3_bucket_region)
    #if(not bucket_response):
        #return False
    hosted_zones = get_route53_hosted_zones()
    for zone in hosted_zones:
        zone_folder = (time_stamp + '/' + zone['Name'][:-1])
        zone_records = get_route53_zone_records(zone['Id'])
        upload_to_s3(
            zone_folder,
            write_zone_to_csv(zone, zone_records),
            s3_bucket_name,
            (zone['Name'] + 'csv')
        )
        upload_to_s3(
            zone_folder,
            write_zone_to_json(zone, zone_records),
            s3_bucket_name,
            (zone['Name'] + 'json')
        )
    return True


if __name__ == "__main__":
    lambda_handler(0, 0)
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

</description>
      <category>aws</category>
      <category>lambda</category>
      <category>backups</category>
    </item>
  </channel>
</rss>
