public synchronized void createS3BucketIfNotExists(String p_bucket_name) { _logger.debug("Searching for bucket " + p_bucket_name); if (!s3Client.doesBucketExist(p_bucket_name)) { Bucket bucket = s3Client.createBucket(p_bucket_name); _logger.info("Created bucket: " + bucket.getName()); } else { _logger.debug("Bucket detected. Verifying permissions."); try { s3Client.getBucketAcl(p_bucket_name); } catch (AmazonClientException ex) { _logger.warn("Permission check failed. Randomizing."); ConfigFacade.set(Configuration.S3_BUCKET_FORMAT, p_bucket_name + "-" + Security.getRandomHash(8)); _logger.debug("Reiterating with: " + p_bucket_name); createS3BucketIfNotExists(getConfiguredBucketName()); } } }
private LambdaConfig() { try (InputStream is = getClass().getClassLoader().getResourceAsStream("env.properties")) { this.props.load(is); this.repository = new FileRepository(new File(System.getProperty("java.io.tmpdir"), "s3")); } catch (IOException e) { throw new IllegalArgumentException(e); } overwriteWithSystemProperty(ENV_BRANCH); overwriteWithSystemProperty(ENV_BUCKET); overwriteWithSystemProperty(ENV_GITHUB); this.remote = new Remote(Constants.DEFAULT_REMOTE_NAME); this.branch = new Branch(props.getProperty(ENV_BRANCH, Constants.MASTER)); this.authentication = new SecureShellAuthentication(new Bucket(props.getProperty(ENV_BUCKET)), client); }
@Override public List<Bucket> listBuckets(ListBucketsRequest listBucketsRequest) throws SdkClientException, AmazonServiceException { listBucketsRequest = beforeClientExecution(listBucketsRequest); rejectNull(listBucketsRequest, "The request object parameter listBucketsRequest must be specified."); Request<ListBucketsRequest> request = createRequest(null, null, listBucketsRequest, HttpMethodName.GET); //Add IBM Service Instance Id to headers if ((null != this.awsCredentialsProvider ) && (this.awsCredentialsProvider.getCredentials() instanceof IBMOAuthCredentials)) { IBMOAuthCredentials oAuthCreds = (IBMOAuthCredentials)this.awsCredentialsProvider.getCredentials(); if (oAuthCreds.getServiceInstanceId() != null) { request.addHeader(Headers.IBM_SERVICE_INSTANCE_ID, oAuthCreds.getServiceInstanceId()); } } return invoke(request, new Unmarshallers.ListBucketsUnmarshaller(), null, null); }
/** * Deletes all existing buckets */ @After public void cleanupFilestore() { for (final Bucket bucket : s3Client.listBuckets()) { if (!INITIAL_BUCKET_NAMES.contains(bucket.getName())) { s3Client.listMultipartUploads(new ListMultipartUploadsRequest(bucket.getName())) .getMultipartUploads() .forEach(upload -> s3Client.abortMultipartUpload( new AbortMultipartUploadRequest(bucket.getName(), upload.getKey(), upload.getUploadId())) ); s3Client.deleteBucket(bucket.getName()); } } }
/** * Verify that buckets can be created and listed */ @Test public void shouldCreateBucketAndListAllBuckets() { // the returned creation date might strip off the millisecond-part, resulting in rounding down // and account for a clock-skew in the Docker container of up to a minute. final Date creationDate = new Date((System.currentTimeMillis() / 1000) * 1000 - 60000); final Bucket bucket = s3Client.createBucket(BUCKET_NAME); assertThat( String.format("Bucket name should match '%s'!", BUCKET_NAME), bucket.getName(), equalTo(BUCKET_NAME)); final List<Bucket> buckets = s3Client.listBuckets().stream().filter(b -> BUCKET_NAME.equals(b.getName())) .collect(Collectors.toList()); assertThat("Expecting one bucket", buckets, hasSize(1)); final Bucket createdBucket = buckets.get(0); assertThat(createdBucket.getCreationDate(), greaterThanOrEqualTo(creationDate)); final Owner bucketOwner = createdBucket.getOwner(); assertThat(bucketOwner.getDisplayName(), equalTo("s3-mock-file-store")); assertThat(bucketOwner.getId(), equalTo("123")); }
@Test public void testThatHandlerCanDeriveS3BucketAndGetConfig() { String arn = "arn:aws:lambda:us-west-2:1111111:function:dev-gateway-fas342452-6d86-LambdaWAFBlacklistingFun-1LSORI5GUP95H"; String bucketName = "dev-cerberusconfigbucket"; String confJson = "{\n" + " \"manual_white_list_ip_set_id\" : \"11111-a3be-41ee-2222-33f708dd939e\",\n" + " \"manual_black_list_ip_set_id\" : \"11111-569d-4924-22222-33333\",\n" + " \"rate_limit_auto_black_list_ip_set_id\" : \"33333-5195-4b32-44444-71d8fbb9ff4d\",\n" + " \"rate_limit_violation_blacklist_period_in_minutes\" : 10,\n" + " \"request_per_minute_limit\" : 10\n" + "}"; List<Bucket> bucketList = Lists.newLinkedList(); bucketList.add(new Bucket(bucketName)); when(amazonS3Client.listBuckets()).thenReturn(bucketList); S3Object object = new S3Object(); object.setObjectContent(new ByteArrayInputStream(confJson.getBytes())); when(amazonS3Client.getObject(any())).thenReturn(object); CloudFrontLogHandlerConfig config = handler.getConfiguration(arn); assertTrue(config.getRequestPerMinuteLimit() == 10); }
@Test(expected = RuntimeException.class) public void testThatHandlerErrorsWhenWeCantFindTheConfigFile() { String arn = "arn:aws:lambda:us-west-2:1111111:function:dev-gateway-fas342452-6d86-LambdaWAFBlacklistingFun-1LSORI5GUP95H"; String bucketName = "dev-cerberusconfigbucket"; List<Bucket> bucketList = Lists.newLinkedList(); bucketList.add(new Bucket(bucketName)); AmazonS3Exception e = new AmazonS3Exception("foo"); e.setErrorCode("NoSuchKey"); when(amazonS3Client.getObject(any())).thenThrow(e); when(amazonS3Client.listBuckets()).thenReturn(bucketList); handler.getConfiguration(arn); }
public static void main(String[] args) { final String USAGE = "\n" + "CreateBucket - create an S3 bucket\n\n" + "Usage: CreateBucket <bucketname>\n\n" + "Where:\n" + " bucketname - the name of the bucket to create.\n\n" + "The bucket name must be unique, or an error will result.\n"; if (args.length < 1) { System.out.println(USAGE); System.exit(1); } String bucket_name = args[0]; System.out.format("\nCreating S3 bucket: %s\n", bucket_name); Bucket b = createBucket(bucket_name); if (b == null) { System.out.println("Error creating bucket!\n"); } else { System.out.println("Done!\n"); } }
private Optional<String> findBucket(final String environmentName) { AmazonS3Client s3Client = new AmazonS3Client(); List<Bucket> buckets = s3Client.listBuckets(); String envBucket = null; for (final Bucket bucket : buckets) { if (StringUtils.contains(bucket.getName(), ConfigConstants.CONFIG_BUCKET_KEY)) { String[] parts = bucket.getName().split("-"); if (StringUtils.equalsIgnoreCase(environmentName, parts[0])) { envBucket = bucket.getName(); break; } } } return Optional.ofNullable(envBucket); }
public BucketNameValidationDTO validateBucketName(String bucketName) { if (!bucketName.startsWith(enhancedSnapshotBucketPrefix002)) { return new BucketNameValidationDTO(false, "Bucket name should start with " + enhancedSnapshotBucketPrefix002); } if (amazonS3.doesBucketExist(bucketName)) { // check whether we own this bucket List<Bucket> buckets = amazonS3.listBuckets(); for (Bucket bucket : buckets) { if (bucket.getName().equals(bucketName)) { return new BucketNameValidationDTO(true, ""); } } return new BucketNameValidationDTO(false, "The requested bucket name is not available.Please select a different name."); } try { BucketNameUtils.validateBucketName(bucketName); return new BucketNameValidationDTO(true, ""); } catch (IllegalArgumentException e) { return new BucketNameValidationDTO(false, e.getMessage()); } }
@Test public void checkNewlyAddedBuckets() { Instant addedTime1 = LocalDateTime.of(2015, 10, 15, 12, 25).toInstant(ZoneOffset.UTC); Instant addedTime2 = LocalDateTime.of(2015, 10, 15, 13, 30).toInstant(ZoneOffset.UTC); when(clock.instant()).thenReturn(addedTime1, addedTime2); s3.createBucket("new_bucket_1"); s3.createBucket("new_bucket_2"); List<Bucket> buckets = s3.listBuckets(); assertThat(buckets).hasSize(2); assertThat(getBucketWithName("new_bucket_1", buckets).getCreationDate()).isEqualTo(Date.from(addedTime1)); assertThat(getBucketWithName("new_bucket_2", buckets).getCreationDate()).isEqualTo(Date.from(addedTime2)); buckets.forEach(bucket -> { Owner owner = bucket.getOwner(); assertThat(owner.getDisplayName()).isEqualTo("test"); assertThat(owner.getId()).isNotEmpty(); }); }
private static void createBucket(TransferManager tm, String bucketName) throws AmazonClientException { boolean existsBucket = false; for (Bucket bucket : tm.getAmazonS3Client().listBuckets()) { if (bucket.getName().equals(bucketName)) { existsBucket = true; } } if (!existsBucket) { LOGGER.info("Creating Amazon S3 bucket " + bucketName); try { tm.getAmazonS3Client().createBucket(bucketName); } catch (AmazonClientException e) { LOGGER.error("Amazon S3 bucket creation for bucket " + bucketName + " failed"); throw new AmazonClientException(e.getMessage()); } LOGGER.info("Amazon S3 bucket creation for bucket " + bucketName + " completed successfully"); } }
private AmazonS3 prepareMockForTestWildcardInBucketName() { AmazonS3 amazonS3 = mock(AmazonS3.class); when(amazonS3.listBuckets()).thenReturn(Arrays.asList(new Bucket("myBucketOne"), new Bucket("myBucketTwo"), new Bucket("anotherBucket"), new Bucket("myBuckez"))); // Mocks for the '**' case ObjectListing objectListingWithOneFile = createObjectListingMock(Collections.singletonList(createS3ObjectSummaryWithKey("test.txt")), Collections.emptyList(), false); ObjectListing emptyObjectListing = createObjectListingMock(Collections.emptyList(), Collections.emptyList(), false); when(amazonS3.listObjects(argThat(new ListObjectsRequestMatcher("myBucketOne", null, null)))).thenReturn(objectListingWithOneFile); when(amazonS3.listObjects(argThat(new ListObjectsRequestMatcher("myBucketTwo", null, null)))).thenReturn(emptyObjectListing); when(amazonS3.listObjects(argThat(new ListObjectsRequestMatcher("anotherBucket", null, null)))).thenReturn(objectListingWithOneFile); when(amazonS3.listObjects(argThat(new ListObjectsRequestMatcher("myBuckez", null, null)))).thenReturn(emptyObjectListing); when(amazonS3.getObjectMetadata(any(GetObjectMetadataRequest.class))).thenReturn(new ObjectMetadata()); return amazonS3; }
public Bucket createBucketForInstance(String instanceId, ServiceDefinition service, String planId, String organizationGuid, String spaceGuid) { String bucketName = getBucketNameForInstance(instanceId); logger.info("Creating bucket '{}' for serviceInstanceId '{}'", bucketName, instanceId); Bucket bucket = s3.createBucket(bucketName, Region.fromValue(region)); // TODO allow for additional, custom tagging options BucketTaggingConfiguration bucketTaggingConfiguration = new BucketTaggingConfiguration(); TagSet tagSet = new TagSet(); tagSet.setTag("serviceInstanceId", instanceId); tagSet.setTag("serviceDefinitionId", service.getId()); tagSet.setTag("planId", planId); tagSet.setTag("organizationGuid", organizationGuid); tagSet.setTag("spaceGuid", spaceGuid); bucketTaggingConfiguration.withTagSets(tagSet); s3.setBucketTaggingConfiguration(bucket.getName(), bucketTaggingConfiguration); return bucket; }
@Test public void listBucketsShouldReturnExpectedBuckets() throws Exception { removeDefaultBucket(); client.createBucket("bucket-1"); client.createBucket("bucket-2"); client.createBucket("bucket-3"); client.createBucket("bucket-4"); client.createBucket("bucket-5"); List<com.amazonaws.services.s3.model.Bucket> buckets = client.listBuckets(); List<String> bucketNames = new ArrayList<>(); for(Bucket bucket : buckets) { bucketNames.add(bucket.getName()); } assertEquals(5, bucketNames.size()); assertTrue(bucketNames.contains("bucket-1")); assertTrue(bucketNames.contains("bucket-2")); assertTrue(bucketNames.contains("bucket-3")); assertTrue(bucketNames.contains("bucket-4")); assertTrue(bucketNames.contains("bucket-5")); }
@Override public List<Bucket> listBuckets() throws AmazonClientException, AmazonServiceException { ArrayList<Bucket> list = new ArrayList<Bucket>(); Bucket bucket = new Bucket("camel-bucket"); bucket.setOwner(new Owner("Camel", "camel")); bucket.setCreationDate(new Date()); list.add(bucket); return list; }
@Override public Bucket createBucket(CreateBucketRequest createBucketRequest) throws AmazonClientException, AmazonServiceException { if ("nonExistingBucket".equals(createBucketRequest.getBucketName())) { nonExistingBucketCreated = true; } Bucket bucket = new Bucket(); bucket.setName(createBucketRequest.getBucketName()); bucket.setCreationDate(new Date()); bucket.setOwner(new Owner("c2efc7302b9011ba9a78a92ac5fd1cd47b61790499ab5ddf5a37c31f0638a8fc ", "Christian Mueller")); return bucket; }
public SecureShellAuthentication(Bucket bucket, AmazonS3 client) { factory = new JschConfigSessionFactory() { @Override public synchronized RemoteSession getSession(URIish uri, CredentialsProvider credentialsProvider, FS fs, int tms) throws TransportException { // Do not check for default ssh user config fs.setUserHome(null); return super.getSession(uri, credentialsProvider, fs, tms); } @Override protected void configure(OpenSshConfig.Host host, Session session) { session.setConfig("HashKnownHosts", "no"); if ("localhost".equalsIgnoreCase(host.getHostName())) { session.setConfig("StrictHostKeyChecking", "no"); } } @Override protected void configureJSch(JSch jsch) { S3Object file; file = client.getObject(bucket.getName(), ".ssh/known_hosts"); try (InputStream is = file.getObjectContent()) { jsch.setKnownHosts(is); } catch (IOException | JSchException e) { throw new IllegalArgumentException("Missing known hosts file on s3: .ssh/known_hosts", e); } file = client.getObject(bucket.getName(), ".ssh/id_rsa"); try (InputStream is = file.getObjectContent()) { jsch.addIdentity("git", IOUtils.toByteArray(is), null, new byte[0]); } catch (IOException | JSchException e) { throw new IllegalArgumentException("Missing key file on s3: .ssh/id_rsa", e); } } }; }
public RepositoryS3(Bucket bucket, Repository repository, AmazonS3 s3, Branch branch) { this.s3 = s3; this.bucket = bucket; this.repository = repository; this.branch = branch; this.uri = new URIish().setScheme("amazon-s3").setHost(bucket.getName()).setPath(Constants.DOT_GIT); }
/** * Verifies that default Buckets got created after S3 Mock was bootstrapped. */ @Test public void defaultBucketsGotCreated() { final List<Bucket> buckets = s3Client.listBuckets(); final Set<String> bucketNames = buckets.stream().map(Bucket::getName) .filter(INITIAL_BUCKET_NAMES::contains).collect(Collectors.toSet()); assertThat("Not all default Buckets got created", bucketNames, is(equalTo(new HashSet<>(INITIAL_BUCKET_NAMES)))); }
/** * Verifies multipart copy. * * @throws InterruptedException */ @Test public void multipartCopy() throws InterruptedException, IOException, NoSuchAlgorithmException { final int contentLen = 3 * _1MB; final ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(contentLen); final String assumedSourceKey = UUID.randomUUID().toString(); final Bucket sourceBucket = s3Client.createBucket(UUID.randomUUID().toString()); final Bucket targetBucket = s3Client.createBucket(UUID.randomUUID().toString()); final TransferManager transferManager = createTransferManager(_2MB, _1MB, _2MB, _1MB); final InputStream sourceInputStream = randomInputStream(contentLen); final Upload upload = transferManager .upload(sourceBucket.getName(), assumedSourceKey, sourceInputStream, objectMetadata); final UploadResult uploadResult = upload.waitForUploadResult(); assertThat(uploadResult.getKey(), is(assumedSourceKey)); final String assumedDestinationKey = UUID.randomUUID().toString(); final Copy copy = transferManager.copy(sourceBucket.getName(), assumedSourceKey, targetBucket.getName(), assumedDestinationKey); final CopyResult copyResult = copy.waitForCopyResult(); assertThat(copyResult.getDestinationKey(), is(assumedDestinationKey)); final S3Object copiedObject = s3Client.getObject(targetBucket.getName(), assumedDestinationKey); assertThat("Hashes for source and target S3Object do not match.", HashUtil.getDigest(copiedObject.getObjectContent()) + "-1", is(uploadResult.getETag())); }
public static String autoDiscoverBillsBucketName(AmazonS3 s3Client, String awsAccountId) { String billFilePrefix = awsAccountId + AWSCsvBillParser.AWS_DETAILED_BILL_CSV_FILE_NAME_MID; for (Bucket bucket : s3Client.listBuckets()) { // For each bucket accessible to this client, try to search for files with the // 'billFilePrefix' ObjectListing objectListing = s3Client.listObjects(bucket.getName(), billFilePrefix); if (!objectListing.getObjectSummaries().isEmpty()) { // This means that this bucket contains zip files representing the detailed csv // bills. return bucket.getName(); } } return null; }
/** * Call the listBuckets() method to enumerate S3 buckets. * AWS SDK does not have an async method for listing buckets, so we use the synchronous method * in a fixed thread pool for S3 enumeration service. * If listBuckets() call fails due to unsupported region, we mark the S3 client invalid, * stop the enumeration flow and patch back to parent. */ private void enumerateS3Buckets(S3StorageEnumerationContext aws) { logInfo(() -> String.format("Running creation enumeration in refresh mode for %s", aws.request.original.resourceReference)); OperationContext operationContext = OperationContext.getOperationContext(); this.executorService.submit(new Runnable() { @Override public void run() { try { List<Bucket> bucketList = aws.amazonS3Client.listBuckets(); for (Bucket bucket : bucketList) { aws.remoteBucketsByBucketName.put(bucket.getName(), bucket); } OperationContext.restoreOperationContext(operationContext); if (aws.remoteBucketsByBucketName.isEmpty()) { aws.subStage = S3StorageEnumerationSubStage.DELETE_DISKS; } handleReceivedEnumerationData(aws); } catch (Exception e) { if (e instanceof AmazonS3Exception && ((AmazonS3Exception) e) .getStatusCode() == Operation.STATUS_CODE_FORBIDDEN) { markClientInvalid(aws); } else { logSevere("Exception enumerating S3 buckets for [ex=%s]", e.getMessage()); aws.error = e; aws.stage = S3StorageEnumerationStages.ERROR; handleEnumerationRequest(aws); } } } }); }
/** * Map an S3 bucket to a photon-model disk state. */ private DiskState mapBucketToDiskState(Bucket bucket, S3StorageEnumerationContext aws) { DiskState diskState = new DiskState(); diskState.id = bucket.getName(); diskState.name = bucket.getName(); diskState.storageType = STORAGE_TYPE_S3; diskState.regionId = aws.regionsByBucketName.get(bucket.getName()); diskState.authCredentialsLink = aws.endpointAuth.documentSelfLink; diskState.resourcePoolLink = aws.request.original.resourcePoolLink; diskState.endpointLink = aws.request.original.endpointLink; if (diskState.endpointLinks == null) { diskState.endpointLinks = new HashSet<>(); } diskState.endpointLinks.add(aws.request.original.endpointLink); diskState.tenantLinks = aws.parentCompute.tenantLinks; diskState.computeHostLink = aws.parentCompute.documentSelfLink; diskState.tagLinks = new HashSet<>(); if (bucket.getCreationDate() != null) { diskState.creationTimeMicros = TimeUnit.MILLISECONDS .toMicros(bucket.getCreationDate().getTime()); } if (bucket.getOwner() != null && bucket.getOwner().getDisplayName() != null) { diskState.customProperties = new HashMap<>(); diskState.customProperties.put(BUCKET_OWNER_NAME, bucket.getOwner().getDisplayName()); } // Set internal type tag for all S3 disk states only if POST for the TagState was successful. if (aws.internalTypeTagSelfLink != null) { diskState.tagLinks.add(aws.internalTypeTagSelfLink); } return diskState; }
@Test public void cleanUpAWSS3() { if (this.isMock) { return; } List<Bucket> buckets = this.s3Clients.get(Regions.DEFAULT_REGION.getName()).listBuckets(); for (Bucket bucket : buckets) { long bucketCreationTimeMicros = TimeUnit .MILLISECONDS .toMicros(bucket.getCreationDate().getTime()); long timeDifference = Utils.getNowMicrosUtc() - bucketCreationTimeMicros; if (bucket.getName().contains("enumtest-bucket") && timeDifference > TimeUnit.HOURS.toMicros(1) && !bucket.getName().contains("enumtest-bucket-do-not-delete")) { for (AmazonS3Client s3Client : this.s3Clients.values()) { try { s3Client.deleteBucket(bucket.getName()); this.host.log(Level.INFO, "Deleting stale bucket %s", bucket.getName()); } catch (Exception e) { continue; } } } } }
@Override protected void doPost(@NotNull final HttpServletRequest request, @NotNull final HttpServletResponse response, @NotNull final Element xmlResponse) { final ActionErrors errors = new ActionErrors(); final Map<String, String> parameters = getProperties(request); try { xmlResponse.addContent((Content) S3Util.withS3Client(parameters, s3Client -> { final Element bucketsElement = new Element("buckets"); for (Bucket bucket : s3Client.listBuckets()) { final Element bucketElement = new Element("bucket"); final String bucketName = bucket.getName(); final String location = s3Client.getBucketLocation(bucketName); final String regionName = getRegionName(location); bucketElement.setAttribute("location", regionName); bucketElement.setText(bucketName); bucketsElement.addContent(bucketElement); } return bucketsElement; })); } catch (Throwable e) { final String message = String.format("Failed to get list of buckets: %s", e.getMessage()); LOG.infoAndDebugDetails(message, e); errors.addError("buckets", message); } if (errors.hasErrors()) { errors.serialize(xmlResponse); } }
protected String getConfigBucketName(String environmentName) { List<Bucket> buckets = amazonS3Client.listBuckets(); for (final Bucket bucket : buckets) { if (StringUtils.contains(bucket.getName(), CERBERUS_CONFIG_BUCKET)) { String[] parts = bucket.getName().split("-"); if (StringUtils.equalsIgnoreCase(environmentName, parts[0])) { return bucket.getName(); } } } throw new RuntimeException("Failed to determine the config s3 bucket"); }
@Test(expected = RuntimeException.class) public void testThatHandlerErrorsWhenWeCantFindBucket() { String arn = "arn:aws:lambda:us-west-2:1111111:function:dev-gateway-fas342452-6d86-LambdaWAFBlacklistingFun-1LSORI5GUP95H"; String bucketName = "prod-cerberusconfigbucket"; String bucketName2 = "test-foo"; List<Bucket> bucketList = Lists.newLinkedList(); bucketList.add(new Bucket(bucketName)); bucketList.add(new Bucket(bucketName2)); when(amazonS3Client.listBuckets()).thenReturn(bucketList); handler.getConfiguration(arn); }
@Test(expected = RuntimeException.class) public void testThatHandlerErrorsWhenWeCantFindTheConfigFile2() { String arn = "arn:aws:lambda:us-west-2:1111111:function:dev-gateway-fas342452-6d86-LambdaWAFBlacklistingFun-1LSORI5GUP95H"; String bucketName = "dev-cerberusconfigbucket"; List<Bucket> bucketList = Lists.newLinkedList(); bucketList.add(new Bucket(bucketName)); when(amazonS3Client.getObject(any())).thenReturn(null); when(amazonS3Client.listBuckets()).thenReturn(bucketList); handler.getConfiguration(arn); }
public static Bucket getBucket(String bucket_name) { final AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient(); Bucket named_bucket = null; List<Bucket> buckets = s3.listBuckets(); for (Bucket b : buckets) { if (b.getName().equals(bucket_name)) { named_bucket = b; } } return named_bucket; }