Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 23 additions & 24 deletions .github/workflows/s3-integration.yml
Original file line number Diff line number Diff line change
Expand Up @@ -177,27 +177,26 @@ jobs:
region_name: ${{ env.REGION_NAME }}
stack_name: ${{ env.STACK_NAME }}

# TODO: after aws-sdk-go-v2 migration, not working properly. Disabled for now.
# s3-compatible-integration:
# name: S3 Compatible Integration
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v6

# - name: Set up Go
# uses: actions/setup-go@v6
# with:
# go-version-file: go.mod

# - name: Install Ginkgo
# run: go install github.com/onsi/ginkgo/v2/ginkgo@latest

# - name: Run GCS S3 compatible tests
# run: |
# export access_key_id="${{ secrets.GCP_ACCESS_KEY_ID }}"
# export secret_access_key="${{ secrets.GCP_SECRET_ACCESS_KEY }}"
# export bucket_name="storage-cli-test-aws"
# export s3_endpoint_host="https://storage.googleapis.com"
# export s3_endpoint_port="443"
# ./.github/scripts/s3/run-integration-s3-compat.sh
s3-compatible-integration:
name: S3 Compatible Integration
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v6

- name: Set up Go
uses: actions/setup-go@v6
with:
go-version-file: go.mod

- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo@latest

- name: Run GCS S3 compatible tests
run: |
export access_key_id="${{ secrets.GCP_ACCESS_KEY_ID }}"
export secret_access_key="${{ secrets.GCP_SECRET_ACCESS_KEY }}"
export bucket_name="storage-cli-test-aws"
export s3_endpoint_host="https://storage.googleapis.com"
export s3_endpoint_port="443"
./.github/scripts/s3/run-integration-s3-compat.sh
51 changes: 44 additions & 7 deletions s3/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,18 @@ The S3 client requires a JSON configuration file with the following structure:
"host": "<string> (optional)",
"port": <int> (optional),

"ssl_verify_peer": <bool> (optional),
"use_ssl": <bool> (optional),
"ssl_verify_peer": <bool> (optional - default: true),
"use_ssl": <bool> (optional - default: true),
"signature_version": "<string> (optional)",
"server_side_encryption": "<string> (optional)",
"sse_kms_key_id": "<string> (optional)",
"multipart_upload": <bool> (optional - default: true)
}
```
> Note: Provider specific configuration (automatically set to false by parsing the provided 'host') :
> 1. **multipart_upload** - not supported by Google
> 1. **request_checksum_calculation_enabled** - not supported by Google and AliCloud
> 2. **uploader_checksum_calculation_enabled** - not supported by AliCloud

**Usage examples:**
```shell
Expand Down Expand Up @@ -59,16 +63,49 @@ ginkgo --skip-package=integration --cover -v -r ./s3/...

### Integration Tests

To run the integration tests, export the following variables into your environment:

#### Setup for AWS
1. To run the integration tests, export the following variables into your environment
```
export access_key_id=YOUR_AWS_ACCESS_KEY
export access_key_id=<YOUR_AWS_ACCESS_KEY>
export focus_regex="GENERAL AWS|AWS V2 REGION|AWS V4 REGION|AWS US-EAST-1"
export region_name=us-east-1
export s3_endpoint_host=https://s3.amazonaws.com
export secret_access_key=YOUR_SECRET_ACCESS_KEY
export s3_endpoint_host=s3.amazonaws.com
export secret_access_key=<YOUR_SECRET_ACCESS_KEY>
export stack_name=s3cli-iam
export bucket_name=s3cli-pipeline
```
2. Setup infrastructure with `./.github/scripts/s3/setup-aws-infrastructure.sh`
3. Run the desired tests by executing one or more of the scripts `run-integration-*` in `./.github/scripts/s3` (to run `run-integration-s3-compat` see [Setup for GCP](#setup-for-GCP) or [Setup for AliCloud](#setup-for-alicloud))
4. Teardown infrastructure with `./.github/scripts/s3/run-integration-*`

Run `./.github/scripts/s3/setup-aws-infrastructure.sh` and `./.github/scripts/s3/teardown-infrastructure.sh` before and after the `./.github/scripts/s3/run-integration-*` in repo's root folder.
#### Setup for GCP
1. Create a bucket in GCP
2. Create access keys
1. Navigate to **IAM & Admin > Service Accounts**.
2. Select your service account or create a new one if needed.
3. Ensure your service account has necessary permissions (like `Storage Object Creator`, `Storage Object Viewer`, `Storage Admin`) depending on what access you want.
4. Go to **Cloud Storage** and select **Settings**.
5. In the **Interoperability** section, create an HMAC key for your service account. This generates an "access key ID" and a "secret access key".
3. Export the following variables into your environment:
```
export access_key_id=<YOUR_ACCESS_KEY>
export secret_access_key=<YOUR_SECRET_ACCESS_KEY>
export bucket_name=<YOUR_BUCKET_NAME>
export s3_endpoint_host=storage.googleapis.com
export s3_endpoint_port=443
```
4. Run `run-integration-s3-compat.sh` in `./.github/scripts/s3`

#### Setup for AliCloud
1. Create bucket in AliCloud
2. Create access keys from `RAM -> User -> Create Accesskey`
3. Export the following variables into your environment:
```
export access_key_id=<YOUR_ACCESS_KEY>
export secret_access_key=<YOUR_SECRET_ACCESS_KEY>
export bucket_name=<YOUR_BUCKET_NAME>
export s3_endpoint_host="oss-<YOUR_REGION>.aliyuncs.com"
export s3_endpoint_port=443
```
4. Run `run-integration-s3-compat.sh` in `./.github/scripts/s3`
8 changes: 7 additions & 1 deletion s3/client/aws_s3_blobstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,12 @@ func (b *awsS3Client) Put(src io.ReadSeeker, dest string) error {
// disable multipart uploads by way of large PartSize configuration
u.PartSize = oneTB
}

if cfg.ShouldDisableUploaderRequestChecksumCalculation() {
// Disable checksum calculation for Alicloud OSS (Object Storage Service)
// Alicloud doesn't support AWS chunked encoding with checksum calculation
u.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired
}
})
uploadInput := &s3.PutObjectInput{
Body: src,
Expand Down Expand Up @@ -112,7 +118,7 @@ func (b *awsS3Client) Delete(dest string) error {
}

var apiErr smithy.APIError
if errors.As(err, &apiErr) && apiErr.ErrorCode() == "NotFound" {
if errors.As(err, &apiErr) && (apiErr.ErrorCode() == "NotFound" || apiErr.ErrorCode() == "NoSuchKey") {
return nil
}
return err
Expand Down
71 changes: 71 additions & 0 deletions s3/client/middlewares.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
package client

import (
"context"
"fmt"

v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)

const acceptEncodingHeader = "Accept-Encoding"

type acceptEncodingKey struct{}

func getAcceptEncodingKey(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, acceptEncodingKey{}).(string)
return v
}

func setAcceptEncodingKey(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, acceptEncodingKey{}, value)
}

var dropAcceptEncodingHeader = middleware.FinalizeMiddlewareFunc("DropAcceptEncodingHeader",
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &v4.SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)}
}

if ae := req.Header.Get(acceptEncodingHeader); len(ae) > 0 {
ctx = setAcceptEncodingKey(ctx, ae)
req.Header.Del(acceptEncodingHeader)
in.Request = req
}

return next.HandleFinalize(ctx, in)
},
)

var setAcceptEncodingHeader = middleware.FinalizeMiddlewareFunc("SetAcceptEncodingHeader",
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &v4.SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)}
}

if ae := getAcceptEncodingKey(ctx); len(ae) > 0 {
req.Header.Set(acceptEncodingHeader, ae)
in.Request = req
}

return next.HandleFinalize(ctx, in)
},
)

func AddFixAcceptEncodingMiddleware(stack *middleware.Stack) error {
if _, ok := stack.Finalize.Get("Signing"); !ok {
return nil
}

if err := stack.Finalize.Insert(dropAcceptEncodingHeader, "Signing", middleware.Before); err != nil {
return err
}

if err := stack.Finalize.Insert(setAcceptEncodingHeader, "Signing", middleware.After); err != nil {
return err
}
return nil
}
33 changes: 24 additions & 9 deletions s3/client/sdk.go
Original file line number Diff line number Diff line change
@@ -1,23 +1,37 @@
package client

import (
"context"
"net/http"
"strings"

"context"

"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/sts"
"github.com/aws/smithy-go/middleware"
boshhttp "github.com/cloudfoundry/bosh-utils/httpclient"

s3cli_config "github.com/cloudfoundry/storage-cli/s3/config"
)

func NewAwsS3Client(c *s3cli_config.S3Cli) (*s3.Client, error) {
var apiOptions []func(stack *middleware.Stack) error
if c.IsGoogle() {
// Setup middleware fixing request to Google - they expect the 'accept-encoding' header
// to not be included in the signature of the request. Not needed for "sign" commands
// since they only generate pre-signed URLs without making actual HTTP requests.
apiOptions = append(apiOptions, AddFixAcceptEncodingMiddleware)
}
return NewAwsS3ClientWithApiOptions(c, apiOptions)
}

func NewAwsS3ClientWithApiOptions(
c *s3cli_config.S3Cli,
apiOptions []func(stack *middleware.Stack) error,
) (*s3.Client, error) {
var httpClient *http.Client

if c.SSLVerifyPeer {
Expand All @@ -30,11 +44,7 @@ func NewAwsS3Client(c *s3cli_config.S3Cli) (*s3.Client, error) {
config.WithHTTPClient(httpClient),
}

if c.UseRegion() {
options = append(options, config.WithRegion(c.Region))
} else {
options = append(options, config.WithRegion(s3cli_config.EmptyRegion))
}
options = append(options, config.WithRegion(c.Region))

if c.CredentialsSource == s3cli_config.StaticCredentialsSource {
options = append(options, config.WithCredentialsProvider(
Expand All @@ -57,10 +67,13 @@ func NewAwsS3Client(c *s3cli_config.S3Cli) (*s3.Client, error) {
awsConfig.Credentials = aws.NewCredentialsCache(provider)
}

if c.ShouldDisableRequestChecksumCalculation() {
awsConfig.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired
}

s3Client := s3.NewFromConfig(awsConfig, func(o *s3.Options) {
o.UsePathStyle = !c.HostStyle
if c.S3Endpoint() != "" {
endpoint := c.S3Endpoint()
if endpoint := c.S3Endpoint(); endpoint != "" {
// AWS SDK v2 requires full URI with protocol
if !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
if c.UseSSL {
Expand All @@ -71,6 +84,8 @@ func NewAwsS3Client(c *s3cli_config.S3Cli) (*s3.Client, error) {
}
o.BaseEndpoint = aws.String(endpoint)
}
// Apply custom middlewares if provided
o.APIOptions = append(o.APIOptions, apiOptions...)
})

return s3Client, nil
Expand Down
Loading
Loading