Skip to content

Commit

Permalink
Merge pull request #122 from SumoLogic/hpal_security_fix
Browse files Browse the repository at this point in the history
Published zip files and updated test artifacts
  • Loading branch information
himanshu219 authored Sep 11, 2024
2 parents e4f0492 + dd9fe99 commit 0665bdf
Show file tree
Hide file tree
Showing 7 changed files with 21 additions and 15 deletions.
6 changes: 3 additions & 3 deletions BlockBlobReader/src/blobreaderzipdeploy.json
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,7 @@
"[variables('BlobReader_resourceId')]"
],
"properties": {
"packageUri": "https://appdev-cloudformation-templates.s3.amazonaws.com/AzureBlobReader/taskproducer4.1.2.zip",
"packageUri": "https://appdev-cloudformation-templates.s3.amazonaws.com/AzureBlobReader/taskproducer4.1.4.zip",
"appOffline": true
}
}
Expand Down Expand Up @@ -512,7 +512,7 @@
"[variables('blobreaderconsumer_resourceId')]"
],
"properties": {
"packageUri": "https://appdev-cloudformation-templates.s3.amazonaws.com/AzureBlobReader/taskconsumer4.1.2.zip",
"packageUri": "https://appdev-cloudformation-templates.s3.amazonaws.com/AzureBlobReader/taskconsumer4.1.4.zip",
"appOffline": true
}
}
Expand Down Expand Up @@ -609,7 +609,7 @@
"[variables('DLQProcessor_resourceId')]"
],
"properties": {
"packageUri": "https://appdev-cloudformation-templates.s3.amazonaws.com/AzureBlobReader/dlqprocessor4.1.2.zip",
"packageUri": "https://appdev-cloudformation-templates.s3.amazonaws.com/AzureBlobReader/dlqprocessor4.1.4.zip",
"appOffline": true
}
}
Expand Down
5 changes: 3 additions & 2 deletions BlockBlobReader/src/consumer.js
Original file line number Diff line number Diff line change
Expand Up @@ -195,8 +195,9 @@ function getRowKey(metadata) {

async function setAppendBlobOffset(context, serviceBusTask, newOffset) {

let rowKey = "";
try {
let rowKey = getRowKey(serviceBusTask);
rowKey = getRowKey(serviceBusTask);
// Todo: this should be atomic update if other request decreases offset it shouldn't allow
context.log.verbose("Attempting to update offset row: %s from: %d to: %d", rowKey, serviceBusTask.startByte, newOffset);
let entity = {
Expand All @@ -208,7 +209,7 @@ async function setAppendBlobOffset(context, serviceBusTask, newOffset) {
containerName: serviceBusTask.containerName,
storageName: serviceBusTask.storageName
}
var updateResult = await azureTableClient.updateEntity(entity, "Merge");
let updateResult = await azureTableClient.updateEntity(entity, "Merge");
context.log.verbose("Updated offset result: %s row: %s from: %d to: %d", JSON.stringify(updateResult), rowKey, serviceBusTask.startByte, newOffset);
} catch (error) {
context.log.error(`Error - Failed to update OffsetMap table, error: ${JSON.stringify(error)}, rowKey: ${rowKey}, newOffset: ${newOffset}`)
Expand Down
2 changes: 1 addition & 1 deletion BlockBlobReader/src/create_zip.sh
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ else
fi

echo "creating zip"
version="4.1.2"
version="4.1.4"
producer_zip_file="taskproducer$version.zip"
consumer_zip_file="taskconsumer$version.zip"
dlqprocessor_zip_file="dlqprocessor$version.zip"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,8 +195,9 @@ function getRowKey(metadata) {

async function setAppendBlobOffset(context, serviceBusTask, newOffset) {

let rowKey = "";
try {
let rowKey = getRowKey(serviceBusTask);
rowKey = getRowKey(serviceBusTask);
// Todo: this should be atomic update if other request decreases offset it shouldn't allow
context.log.verbose("Attempting to update offset row: %s from: %d to: %d", rowKey, serviceBusTask.startByte, newOffset);
let entity = {
Expand All @@ -208,7 +209,7 @@ async function setAppendBlobOffset(context, serviceBusTask, newOffset) {
containerName: serviceBusTask.containerName,
storageName: serviceBusTask.storageName
}
var updateResult = await azureTableClient.updateEntity(entity, "Merge");
let updateResult = await azureTableClient.updateEntity(entity, "Merge");
context.log.verbose("Updated offset result: %s row: %s from: %d to: %d", JSON.stringify(updateResult), rowKey, serviceBusTask.startByte, newOffset);
} catch (error) {
context.log.error(`Error - Failed to update OffsetMap table, error: ${JSON.stringify(error)}, rowKey: ${rowKey}, newOffset: ${newOffset}`)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,8 +195,9 @@ function getRowKey(metadata) {

async function setAppendBlobOffset(context, serviceBusTask, newOffset) {

let rowKey = "";
try {
let rowKey = getRowKey(serviceBusTask);
rowKey = getRowKey(serviceBusTask);
// Todo: this should be atomic update if other request decreases offset it shouldn't allow
context.log.verbose("Attempting to update offset row: %s from: %d to: %d", rowKey, serviceBusTask.startByte, newOffset);
let entity = {
Expand All @@ -208,7 +209,7 @@ async function setAppendBlobOffset(context, serviceBusTask, newOffset) {
containerName: serviceBusTask.containerName,
storageName: serviceBusTask.storageName
}
var updateResult = await azureTableClient.updateEntity(entity, "Merge");
let updateResult = await azureTableClient.updateEntity(entity, "Merge");
context.log.verbose("Updated offset result: %s row: %s from: %d to: %d", JSON.stringify(updateResult), rowKey, serviceBusTask.startByte, newOffset);
} catch (error) {
context.log.error(`Error - Failed to update OffsetMap table, error: ${JSON.stringify(error)}, rowKey: ${rowKey}, newOffset: ${newOffset}`)
Expand Down
2 changes: 1 addition & 1 deletion BlockBlobReader/tests/blob_fixtures.json

Large diffs are not rendered by default.

11 changes: 7 additions & 4 deletions BlockBlobReader/tests/test_blobreader.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def get_full_testlog_file_name(self):
if len(self.test_filename) > maxMetadataLength:
expected_filename = self.test_filename[:60] + "..." + self.test_filename[-(60-len(file_ext)):] + file_ext
else:
expected_filename = self.test_filename
expected_filename = self.test_filename + file_ext
return expected_filename

def test_03_func_logs(self):
Expand Down Expand Up @@ -146,7 +146,7 @@ def test_03_func_logs(self):
expected_record_count = {
"blob": 15,
"log": 10,
"json": 120,
"json": 153,
"csv": 12
}
record_count = record_excluded_by_filter_count = record_unsupported_extension_count = None
Expand Down Expand Up @@ -184,7 +184,9 @@ def test_03_func_logs(self):
def upload_message_in_service_bus(self):
file_ext = f".{self.log_type}"
test_filename = self.test_filename + file_ext
file_size = os.path.getsize(f"blob_fixtures{file_ext}")
with open(f"blob_fixtures{file_ext}", "r") as fp:
file_size = len(fp.read())

triggerData = {
"blobName": test_filename,
"containerName": self.test_container_name,
Expand Down Expand Up @@ -329,7 +331,8 @@ def insert_mock_json_in_BlobStorage(self):
self.test_container_name, test_filename)
for i, data_block in enumerate(self.get_json_data()):
block_id = self.get_random_name()
file_bytes = json.dumps(data_block)
# removing spaces(added by json.loads) using separators
file_bytes = json.dumps(data_block, separators=(',', ':'))
file_bytes = (file_bytes[1:-1] if i ==
0 else "," + file_bytes[1:-1]).encode()
self.block_blob_service.put_block(
Expand Down

0 comments on commit 0665bdf

Please sign in to comment.