comparison venv/lib/python2.7/site-packages/boto/s3/bucket.py @ 0:d67268158946 draft

planemo upload commit a3f181f5f126803c654b3a66dd4e83a48f7e203b
author bcclaywell
date Mon, 12 Oct 2015 17:43:33 -0400
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:d67268158946
1 # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
2 # Copyright (c) 2010, Eucalyptus Systems, Inc.
3 # All rights reserved.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining a
6 # copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish, dis-
9 # tribute, sublicense, and/or sell copies of the Software, and to permit
10 # persons to whom the Software is furnished to do so, subject to the fol-
11 # lowing conditions:
12 #
13 # The above copyright notice and this permission notice shall be included
14 # in all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
18 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
19 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 # IN THE SOFTWARE.
23
24 import boto
25 from boto import handler
26 from boto.resultset import ResultSet
27 from boto.exception import BotoClientError
28 from boto.s3.acl import Policy, CannedACLStrings, Grant
29 from boto.s3.key import Key
30 from boto.s3.prefix import Prefix
31 from boto.s3.deletemarker import DeleteMarker
32 from boto.s3.multipart import MultiPartUpload
33 from boto.s3.multipart import CompleteMultiPartUpload
34 from boto.s3.multidelete import MultiDeleteResult
35 from boto.s3.multidelete import Error
36 from boto.s3.bucketlistresultset import BucketListResultSet
37 from boto.s3.bucketlistresultset import VersionedBucketListResultSet
38 from boto.s3.bucketlistresultset import MultiPartUploadListResultSet
39 from boto.s3.lifecycle import Lifecycle
40 from boto.s3.tagging import Tags
41 from boto.s3.cors import CORSConfiguration
42 from boto.s3.bucketlogging import BucketLogging
43 from boto.s3 import website
44 import boto.jsonresponse
45 import boto.utils
46 import xml.sax
47 import xml.sax.saxutils
48 import re
49 import base64
50 from collections import defaultdict
51 from boto.compat import BytesIO, six, StringIO, urllib
52
53 # as per http://goo.gl/BDuud (02/19/2011)
54
55
56 class S3WebsiteEndpointTranslate(object):
57
58 trans_region = defaultdict(lambda: 's3-website-us-east-1')
59 trans_region['eu-west-1'] = 's3-website-eu-west-1'
60 trans_region['us-west-1'] = 's3-website-us-west-1'
61 trans_region['us-west-2'] = 's3-website-us-west-2'
62 trans_region['sa-east-1'] = 's3-website-sa-east-1'
63 trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1'
64 trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1'
65 trans_region['ap-southeast-2'] = 's3-website-ap-southeast-2'
66 trans_region['cn-north-1'] = 's3-website.cn-north-1'
67
68 @classmethod
69 def translate_region(self, reg):
70 return self.trans_region[reg]
71
72 S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL']
73
74
75 class Bucket(object):
76
77 LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
78
79 BucketPaymentBody = """<?xml version="1.0" encoding="UTF-8"?>
80 <RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
81 <Payer>%s</Payer>
82 </RequestPaymentConfiguration>"""
83
84 VersioningBody = """<?xml version="1.0" encoding="UTF-8"?>
85 <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
86 <Status>%s</Status>
87 <MfaDelete>%s</MfaDelete>
88 </VersioningConfiguration>"""
89
90 VersionRE = '<Status>([A-Za-z]+)</Status>'
91 MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>'
92
93 def __init__(self, connection=None, name=None, key_class=Key):
94 self.name = name
95 self.connection = connection
96 self.key_class = key_class
97
98 def __repr__(self):
99 return '<Bucket: %s>' % self.name
100
101 def __iter__(self):
102 return iter(BucketListResultSet(self))
103
104 def __contains__(self, key_name):
105 return not (self.get_key(key_name) is None)
106
107 def startElement(self, name, attrs, connection):
108 return None
109
110 def endElement(self, name, value, connection):
111 if name == 'Name':
112 self.name = value
113 elif name == 'CreationDate':
114 self.creation_date = value
115 else:
116 setattr(self, name, value)
117
118 def set_key_class(self, key_class):
119 """
120 Set the Key class associated with this bucket. By default, this
121 would be the boto.s3.key.Key class but if you want to subclass that
122 for some reason this allows you to associate your new class with a
123 bucket so that when you call bucket.new_key() or when you get a listing
124 of keys in the bucket you will get an instances of your key class
125 rather than the default.
126
127 :type key_class: class
128 :param key_class: A subclass of Key that can be more specific
129 """
130 self.key_class = key_class
131
132 def lookup(self, key_name, headers=None):
133 """
134 Deprecated: Please use get_key method.
135
136 :type key_name: string
137 :param key_name: The name of the key to retrieve
138
139 :rtype: :class:`boto.s3.key.Key`
140 :returns: A Key object from this bucket.
141 """
142 return self.get_key(key_name, headers=headers)
143
144 def get_key(self, key_name, headers=None, version_id=None,
145 response_headers=None, validate=True):
146 """
147 Check to see if a particular key exists within the bucket. This
148 method uses a HEAD request to check for the existence of the key.
149 Returns: An instance of a Key object or None
150
151 :param key_name: The name of the key to retrieve
152 :type key_name: string
153
154 :param headers: The headers to send when retrieving the key
155 :type headers: dict
156
157 :param version_id:
158 :type version_id: string
159
160 :param response_headers: A dictionary containing HTTP
161 headers/values that will override any headers associated
162 with the stored object in the response. See
163 http://goo.gl/EWOPb for details.
164 :type response_headers: dict
165
166 :param validate: Verifies whether the key exists. If ``False``, this
167 will not hit the service, constructing an in-memory object.
168 Default is ``True``.
169 :type validate: bool
170
171 :rtype: :class:`boto.s3.key.Key`
172 :returns: A Key object from this bucket.
173 """
174 if validate is False:
175 if headers or version_id or response_headers:
176 raise BotoClientError(
177 "When providing 'validate=False', no other params " + \
178 "are allowed."
179 )
180
181 # This leans on the default behavior of ``new_key`` (not hitting
182 # the service). If that changes, that behavior should migrate here.
183 return self.new_key(key_name)
184
185 query_args_l = []
186 if version_id:
187 query_args_l.append('versionId=%s' % version_id)
188 if response_headers:
189 for rk, rv in six.iteritems(response_headers):
190 query_args_l.append('%s=%s' % (rk, urllib.parse.quote(rv)))
191
192 key, resp = self._get_key_internal(key_name, headers, query_args_l)
193 return key
194
195 def _get_key_internal(self, key_name, headers, query_args_l):
196 query_args = '&'.join(query_args_l) or None
197 response = self.connection.make_request('HEAD', self.name, key_name,
198 headers=headers,
199 query_args=query_args)
200 response.read()
201 # Allow any success status (2xx) - for example this lets us
202 # support Range gets, which return status 206:
203 if response.status / 100 == 2:
204 k = self.key_class(self)
205 provider = self.connection.provider
206 k.metadata = boto.utils.get_aws_metadata(response.msg, provider)
207 for field in Key.base_fields:
208 k.__dict__[field.lower().replace('-', '_')] = \
209 response.getheader(field)
210 # the following machinations are a workaround to the fact that
211 # apache/fastcgi omits the content-length header on HEAD
212 # requests when the content-length is zero.
213 # See http://goo.gl/0Tdax for more details.
214 clen = response.getheader('content-length')
215 if clen:
216 k.size = int(response.getheader('content-length'))
217 else:
218 k.size = 0
219 k.name = key_name
220 k.handle_version_headers(response)
221 k.handle_encryption_headers(response)
222 k.handle_restore_headers(response)
223 k.handle_addl_headers(response.getheaders())
224 return k, response
225 else:
226 if response.status == 404:
227 return None, response
228 else:
229 raise self.connection.provider.storage_response_error(
230 response.status, response.reason, '')
231
232 def list(self, prefix='', delimiter='', marker='', headers=None,
233 encoding_type=None):
234 """
235 List key objects within a bucket. This returns an instance of an
236 BucketListResultSet that automatically handles all of the result
237 paging, etc. from S3. You just need to keep iterating until
238 there are no more results.
239
240 Called with no arguments, this will return an iterator object across
241 all keys within the bucket.
242
243 The Key objects returned by the iterator are obtained by parsing
244 the results of a GET on the bucket, also known as the List Objects
245 request. The XML returned by this request contains only a subset
246 of the information about each key. Certain metadata fields such
247 as Content-Type and user metadata are not available in the XML.
248 Therefore, if you want these additional metadata fields you will
249 have to do a HEAD request on the Key in the bucket.
250
251 :type prefix: string
252 :param prefix: allows you to limit the listing to a particular
253 prefix. For example, if you call the method with
254 prefix='/foo/' then the iterator will only cycle through
255 the keys that begin with the string '/foo/'.
256
257 :type delimiter: string
258 :param delimiter: can be used in conjunction with the prefix
259 to allow you to organize and browse your keys
260 hierarchically. See http://goo.gl/Xx63h for more details.
261
262 :type marker: string
263 :param marker: The "marker" of where you are in the result set
264
265 :param encoding_type: Requests Amazon S3 to encode the response and
266 specifies the encoding method to use.
267
268 An object key can contain any Unicode character; however, XML 1.0
269 parser cannot parse some characters, such as characters with an
270 ASCII value from 0 to 10. For characters that are not supported in
271 XML 1.0, you can add this parameter to request that Amazon S3
272 encode the keys in the response.
273
274 Valid options: ``url``
275 :type encoding_type: string
276
277 :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
278 :return: an instance of a BucketListResultSet that handles paging, etc
279 """
280 return BucketListResultSet(self, prefix, delimiter, marker, headers,
281 encoding_type=encoding_type)
282
283 def list_versions(self, prefix='', delimiter='', key_marker='',
284 version_id_marker='', headers=None, encoding_type=None):
285 """
286 List version objects within a bucket. This returns an
287 instance of an VersionedBucketListResultSet that automatically
288 handles all of the result paging, etc. from S3. You just need
289 to keep iterating until there are no more results. Called
290 with no arguments, this will return an iterator object across
291 all keys within the bucket.
292
293 :type prefix: string
294 :param prefix: allows you to limit the listing to a particular
295 prefix. For example, if you call the method with
296 prefix='/foo/' then the iterator will only cycle through
297 the keys that begin with the string '/foo/'.
298
299 :type delimiter: string
300 :param delimiter: can be used in conjunction with the prefix
301 to allow you to organize and browse your keys
302 hierarchically. See:
303
304 http://aws.amazon.com/releasenotes/Amazon-S3/213
305
306 for more details.
307
308 :type key_marker: string
309 :param key_marker: The "marker" of where you are in the result set
310
311 :param encoding_type: Requests Amazon S3 to encode the response and
312 specifies the encoding method to use.
313
314 An object key can contain any Unicode character; however, XML 1.0
315 parser cannot parse some characters, such as characters with an
316 ASCII value from 0 to 10. For characters that are not supported in
317 XML 1.0, you can add this parameter to request that Amazon S3
318 encode the keys in the response.
319
320 Valid options: ``url``
321 :type encoding_type: string
322
323 :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
324 :return: an instance of a BucketListResultSet that handles paging, etc
325 """
326 return VersionedBucketListResultSet(self, prefix, delimiter,
327 key_marker, version_id_marker,
328 headers,
329 encoding_type=encoding_type)
330
331 def list_multipart_uploads(self, key_marker='',
332 upload_id_marker='',
333 headers=None, encoding_type=None):
334 """
335 List multipart upload objects within a bucket. This returns an
336 instance of an MultiPartUploadListResultSet that automatically
337 handles all of the result paging, etc. from S3. You just need
338 to keep iterating until there are no more results.
339
340 :type key_marker: string
341 :param key_marker: The "marker" of where you are in the result set
342
343 :type upload_id_marker: string
344 :param upload_id_marker: The upload identifier
345
346 :param encoding_type: Requests Amazon S3 to encode the response and
347 specifies the encoding method to use.
348
349 An object key can contain any Unicode character; however, XML 1.0
350 parser cannot parse some characters, such as characters with an
351 ASCII value from 0 to 10. For characters that are not supported in
352 XML 1.0, you can add this parameter to request that Amazon S3
353 encode the keys in the response.
354
355 Valid options: ``url``
356 :type encoding_type: string
357
358 :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
359 :return: an instance of a BucketListResultSet that handles paging, etc
360 """
361 return MultiPartUploadListResultSet(self, key_marker,
362 upload_id_marker,
363 headers,
364 encoding_type=encoding_type)
365
366 def _get_all_query_args(self, params, initial_query_string=''):
367 pairs = []
368
369 if initial_query_string:
370 pairs.append(initial_query_string)
371
372 for key, value in sorted(params.items(), key=lambda x: x[0]):
373 if value is None:
374 continue
375 key = key.replace('_', '-')
376 if key == 'maxkeys':
377 key = 'max-keys'
378 if not isinstance(value, six.string_types + (six.binary_type,)):
379 value = six.text_type(value)
380 if not isinstance(value, six.binary_type):
381 value = value.encode('utf-8')
382 if value:
383 pairs.append(u'%s=%s' % (
384 urllib.parse.quote(key),
385 urllib.parse.quote(value)
386 ))
387
388 return '&'.join(pairs)
389
390 def _get_all(self, element_map, initial_query_string='',
391 headers=None, **params):
392 query_args = self._get_all_query_args(
393 params,
394 initial_query_string=initial_query_string
395 )
396 response = self.connection.make_request('GET', self.name,
397 headers=headers,
398 query_args=query_args)
399 body = response.read()
400 boto.log.debug(body)
401 if response.status == 200:
402 rs = ResultSet(element_map)
403 h = handler.XmlHandler(rs, self)
404 if not isinstance(body, bytes):
405 body = body.encode('utf-8')
406 xml.sax.parseString(body, h)
407 return rs
408 else:
409 raise self.connection.provider.storage_response_error(
410 response.status, response.reason, body)
411
412 def validate_kwarg_names(self, kwargs, names):
413 """
414 Checks that all named arguments are in the specified list of names.
415
416 :type kwargs: dict
417 :param kwargs: Dictionary of kwargs to validate.
418
419 :type names: list
420 :param names: List of possible named arguments.
421 """
422 for kwarg in kwargs:
423 if kwarg not in names:
424 raise TypeError('Invalid argument "%s"!' % kwarg)
425
426 def get_all_keys(self, headers=None, **params):
427 """
428 A lower-level method for listing contents of a bucket. This
429 closely models the actual S3 API and requires you to manually
430 handle the paging of results. For a higher-level method that
431 handles the details of paging for you, you can use the list
432 method.
433
434 :type max_keys: int
435 :param max_keys: The maximum number of keys to retrieve
436
437 :type prefix: string
438 :param prefix: The prefix of the keys you want to retrieve
439
440 :type marker: string
441 :param marker: The "marker" of where you are in the result set
442
443 :type delimiter: string
444 :param delimiter: If this optional, Unicode string parameter
445 is included with your request, then keys that contain the
446 same string between the prefix and the first occurrence of
447 the delimiter will be rolled up into a single result
448 element in the CommonPrefixes collection. These rolled-up
449 keys are not returned elsewhere in the response.
450
451 :param encoding_type: Requests Amazon S3 to encode the response and
452 specifies the encoding method to use.
453
454 An object key can contain any Unicode character; however, XML 1.0
455 parser cannot parse some characters, such as characters with an
456 ASCII value from 0 to 10. For characters that are not supported in
457 XML 1.0, you can add this parameter to request that Amazon S3
458 encode the keys in the response.
459
460 Valid options: ``url``
461 :type encoding_type: string
462
463 :rtype: ResultSet
464 :return: The result from S3 listing the keys requested
465
466 """
467 self.validate_kwarg_names(params, ['maxkeys', 'max_keys', 'prefix',
468 'marker', 'delimiter',
469 'encoding_type'])
470 return self._get_all([('Contents', self.key_class),
471 ('CommonPrefixes', Prefix)],
472 '', headers, **params)
473
474 def get_all_versions(self, headers=None, **params):
475 """
476 A lower-level, version-aware method for listing contents of a
477 bucket. This closely models the actual S3 API and requires
478 you to manually handle the paging of results. For a
479 higher-level method that handles the details of paging for
480 you, you can use the list method.
481
482 :type max_keys: int
483 :param max_keys: The maximum number of keys to retrieve
484
485 :type prefix: string
486 :param prefix: The prefix of the keys you want to retrieve
487
488 :type key_marker: string
489 :param key_marker: The "marker" of where you are in the result set
490 with respect to keys.
491
492 :type version_id_marker: string
493 :param version_id_marker: The "marker" of where you are in the result
494 set with respect to version-id's.
495
496 :type delimiter: string
497 :param delimiter: If this optional, Unicode string parameter
498 is included with your request, then keys that contain the
499 same string between the prefix and the first occurrence of
500 the delimiter will be rolled up into a single result
501 element in the CommonPrefixes collection. These rolled-up
502 keys are not returned elsewhere in the response.
503
504 :param encoding_type: Requests Amazon S3 to encode the response and
505 specifies the encoding method to use.
506
507 An object key can contain any Unicode character; however, XML 1.0
508 parser cannot parse some characters, such as characters with an
509 ASCII value from 0 to 10. For characters that are not supported in
510 XML 1.0, you can add this parameter to request that Amazon S3
511 encode the keys in the response.
512
513 Valid options: ``url``
514 :type encoding_type: string
515
516 :rtype: ResultSet
517 :return: The result from S3 listing the keys requested
518 """
519 self.validate_get_all_versions_params(params)
520 return self._get_all([('Version', self.key_class),
521 ('CommonPrefixes', Prefix),
522 ('DeleteMarker', DeleteMarker)],
523 'versions', headers, **params)
524
525 def validate_get_all_versions_params(self, params):
526 """
527 Validate that the parameters passed to get_all_versions are valid.
528 Overridden by subclasses that allow a different set of parameters.
529
530 :type params: dict
531 :param params: Parameters to validate.
532 """
533 self.validate_kwarg_names(
534 params, ['maxkeys', 'max_keys', 'prefix', 'key_marker',
535 'version_id_marker', 'delimiter', 'encoding_type'])
536
537 def get_all_multipart_uploads(self, headers=None, **params):
538 """
539 A lower-level, version-aware method for listing active
540 MultiPart uploads for a bucket. This closely models the
541 actual S3 API and requires you to manually handle the paging
542 of results. For a higher-level method that handles the
543 details of paging for you, you can use the list method.
544
545 :type max_uploads: int
546 :param max_uploads: The maximum number of uploads to retrieve.
547 Default value is 1000.
548
549 :type key_marker: string
550 :param key_marker: Together with upload_id_marker, this
551 parameter specifies the multipart upload after which
552 listing should begin. If upload_id_marker is not
553 specified, only the keys lexicographically greater than
554 the specified key_marker will be included in the list.
555
556 If upload_id_marker is specified, any multipart uploads
557 for a key equal to the key_marker might also be included,
558 provided those multipart uploads have upload IDs
559 lexicographically greater than the specified
560 upload_id_marker.
561
562 :type upload_id_marker: string
563 :param upload_id_marker: Together with key-marker, specifies
564 the multipart upload after which listing should begin. If
565 key_marker is not specified, the upload_id_marker
566 parameter is ignored. Otherwise, any multipart uploads
567 for a key equal to the key_marker might be included in the
568 list only if they have an upload ID lexicographically
569 greater than the specified upload_id_marker.
570
571 :type encoding_type: string
572 :param encoding_type: Requests Amazon S3 to encode the response and
573 specifies the encoding method to use.
574
575 An object key can contain any Unicode character; however, XML 1.0
576 parser cannot parse some characters, such as characters with an
577 ASCII value from 0 to 10. For characters that are not supported in
578 XML 1.0, you can add this parameter to request that Amazon S3
579 encode the keys in the response.
580
581 Valid options: ``url``
582
583 :type delimiter: string
584 :param delimiter: Character you use to group keys.
585 All keys that contain the same string between the prefix, if
586 specified, and the first occurrence of the delimiter after the
587 prefix are grouped under a single result element, CommonPrefixes.
588 If you don't specify the prefix parameter, then the substring
589 starts at the beginning of the key. The keys that are grouped
590 under CommonPrefixes result element are not returned elsewhere
591 in the response.
592
593 :type prefix: string
594 :param prefix: Lists in-progress uploads only for those keys that
595 begin with the specified prefix. You can use prefixes to separate
596 a bucket into different grouping of keys. (You can think of using
597 prefix to make groups in the same way you'd use a folder in a
598 file system.)
599
600 :rtype: ResultSet
601 :return: The result from S3 listing the uploads requested
602
603 """
604 self.validate_kwarg_names(params, ['max_uploads', 'key_marker',
605 'upload_id_marker', 'encoding_type',
606 'delimiter', 'prefix'])
607 return self._get_all([('Upload', MultiPartUpload),
608 ('CommonPrefixes', Prefix)],
609 'uploads', headers, **params)
610
611 def new_key(self, key_name=None):
612 """
613 Creates a new key
614
615 :type key_name: string
616 :param key_name: The name of the key to create
617
618 :rtype: :class:`boto.s3.key.Key` or subclass
619 :returns: An instance of the newly created key object
620 """
621 if not key_name:
622 raise ValueError('Empty key names are not allowed')
623 return self.key_class(self, key_name)
624
625 def generate_url(self, expires_in, method='GET', headers=None,
626 force_http=False, response_headers=None,
627 expires_in_absolute=False):
628 return self.connection.generate_url(expires_in, method, self.name,
629 headers=headers,
630 force_http=force_http,
631 response_headers=response_headers,
632 expires_in_absolute=expires_in_absolute)
633
634 def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None):
635 """
636 Deletes a set of keys using S3's Multi-object delete API. If a
637 VersionID is specified for that key then that version is removed.
638 Returns a MultiDeleteResult Object, which contains Deleted
639 and Error elements for each key you ask to delete.
640
641 :type keys: list
642 :param keys: A list of either key_names or (key_name, versionid) pairs
643 or a list of Key instances.
644
645 :type quiet: boolean
646 :param quiet: In quiet mode the response includes only keys
647 where the delete operation encountered an error. For a
648 successful deletion, the operation does not return any
649 information about the delete in the response body.
650
651 :type mfa_token: tuple or list of strings
652 :param mfa_token: A tuple or list consisting of the serial
653 number from the MFA device and the current value of the
654 six-digit token associated with the device. This value is
655 required anytime you are deleting versioned objects from a
656 bucket that has the MFADelete option on the bucket.
657
658 :returns: An instance of MultiDeleteResult
659 """
660 ikeys = iter(keys)
661 result = MultiDeleteResult(self)
662 provider = self.connection.provider
663 query_args = 'delete'
664
665 def delete_keys2(hdrs):
666 hdrs = hdrs or {}
667 data = u"""<?xml version="1.0" encoding="UTF-8"?>"""
668 data += u"<Delete>"
669 if quiet:
670 data += u"<Quiet>true</Quiet>"
671 count = 0
672 while count < 1000:
673 try:
674 key = next(ikeys)
675 except StopIteration:
676 break
677 if isinstance(key, six.string_types):
678 key_name = key
679 version_id = None
680 elif isinstance(key, tuple) and len(key) == 2:
681 key_name, version_id = key
682 elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name:
683 key_name = key.name
684 version_id = key.version_id
685 else:
686 if isinstance(key, Prefix):
687 key_name = key.name
688 code = 'PrefixSkipped' # Don't delete Prefix
689 else:
690 key_name = repr(key) # try get a string
691 code = 'InvalidArgument' # other unknown type
692 message = 'Invalid. No delete action taken for this object.'
693 error = Error(key_name, code=code, message=message)
694 result.errors.append(error)
695 continue
696 count += 1
697 data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_name)
698 if version_id:
699 data += u"<VersionId>%s</VersionId>" % version_id
700 data += u"</Object>"
701 data += u"</Delete>"
702 if count <= 0:
703 return False # no more
704 data = data.encode('utf-8')
705 fp = BytesIO(data)
706 md5 = boto.utils.compute_md5(fp)
707 hdrs['Content-MD5'] = md5[1]
708 hdrs['Content-Type'] = 'text/xml'
709 if mfa_token:
710 hdrs[provider.mfa_header] = ' '.join(mfa_token)
711 response = self.connection.make_request('POST', self.name,
712 headers=hdrs,
713 query_args=query_args,
714 data=data)
715 body = response.read()
716 if response.status == 200:
717 h = handler.XmlHandler(result, self)
718 if not isinstance(body, bytes):
719 body = body.encode('utf-8')
720 xml.sax.parseString(body, h)
721 return count >= 1000 # more?
722 else:
723 raise provider.storage_response_error(response.status,
724 response.reason,
725 body)
726 while delete_keys2(headers):
727 pass
728 return result
729
730 def delete_key(self, key_name, headers=None, version_id=None,
731 mfa_token=None):
732 """
733 Deletes a key from the bucket. If a version_id is provided,
734 only that version of the key will be deleted.
735
736 :type key_name: string
737 :param key_name: The key name to delete
738
739 :type version_id: string
740 :param version_id: The version ID (optional)
741
742 :type mfa_token: tuple or list of strings
743 :param mfa_token: A tuple or list consisting of the serial
744 number from the MFA device and the current value of the
745 six-digit token associated with the device. This value is
746 required anytime you are deleting versioned objects from a
747 bucket that has the MFADelete option on the bucket.
748
749 :rtype: :class:`boto.s3.key.Key` or subclass
750 :returns: A key object holding information on what was
751 deleted. The Caller can see if a delete_marker was
752 created or removed and what version_id the delete created
753 or removed.
754 """
755 if not key_name:
756 raise ValueError('Empty key names are not allowed')
757 return self._delete_key_internal(key_name, headers=headers,
758 version_id=version_id,
759 mfa_token=mfa_token,
760 query_args_l=None)
761
762 def _delete_key_internal(self, key_name, headers=None, version_id=None,
763 mfa_token=None, query_args_l=None):
764 query_args_l = query_args_l or []
765 provider = self.connection.provider
766 if version_id:
767 query_args_l.append('versionId=%s' % version_id)
768 query_args = '&'.join(query_args_l) or None
769 if mfa_token:
770 if not headers:
771 headers = {}
772 headers[provider.mfa_header] = ' '.join(mfa_token)
773 response = self.connection.make_request('DELETE', self.name, key_name,
774 headers=headers,
775 query_args=query_args)
776 body = response.read()
777 if response.status != 204:
778 raise provider.storage_response_error(response.status,
779 response.reason, body)
780 else:
781 # return a key object with information on what was deleted.
782 k = self.key_class(self)
783 k.name = key_name
784 k.handle_version_headers(response)
785 k.handle_addl_headers(response.getheaders())
786 return k
787
788 def copy_key(self, new_key_name, src_bucket_name,
789 src_key_name, metadata=None, src_version_id=None,
790 storage_class='STANDARD', preserve_acl=False,
791 encrypt_key=False, headers=None, query_args=None):
792 """
793 Create a new key in the bucket by copying another existing key.
794
795 :type new_key_name: string
796 :param new_key_name: The name of the new key
797
798 :type src_bucket_name: string
799 :param src_bucket_name: The name of the source bucket
800
801 :type src_key_name: string
802 :param src_key_name: The name of the source key
803
804 :type src_version_id: string
805 :param src_version_id: The version id for the key. This param
806 is optional. If not specified, the newest version of the
807 key will be copied.
808
809 :type metadata: dict
810 :param metadata: Metadata to be associated with new key. If
811 metadata is supplied, it will replace the metadata of the
812 source key being copied. If no metadata is supplied, the
813 source key's metadata will be copied to the new key.
814
815 :type storage_class: string
816 :param storage_class: The storage class of the new key. By
817 default, the new key will use the standard storage class.
818 Possible values are: STANDARD | REDUCED_REDUNDANCY
819
820 :type preserve_acl: bool
821 :param preserve_acl: If True, the ACL from the source key will
822 be copied to the destination key. If False, the
823 destination key will have the default ACL. Note that
824 preserving the ACL in the new key object will require two
825 additional API calls to S3, one to retrieve the current
826 ACL and one to set that ACL on the new object. If you
827 don't care about the ACL, a value of False will be
828 significantly more efficient.
829
830 :type encrypt_key: bool
831 :param encrypt_key: If True, the new copy of the object will
832 be encrypted on the server-side by S3 and will be stored
833 in an encrypted form while at rest in S3.
834
835 :type headers: dict
836 :param headers: A dictionary of header name/value pairs.
837
838 :type query_args: string
839 :param query_args: A string of additional querystring arguments
840 to append to the request
841
842 :rtype: :class:`boto.s3.key.Key` or subclass
843 :returns: An instance of the newly created key object
844 """
845 headers = headers or {}
846 provider = self.connection.provider
847 src_key_name = boto.utils.get_utf8_value(src_key_name)
848 if preserve_acl:
849 if self.name == src_bucket_name:
850 src_bucket = self
851 else:
852 src_bucket = self.connection.get_bucket(
853 src_bucket_name, validate=False)
854 acl = src_bucket.get_xml_acl(src_key_name)
855 if encrypt_key:
856 headers[provider.server_side_encryption_header] = 'AES256'
857 src = '%s/%s' % (src_bucket_name, urllib.parse.quote(src_key_name))
858 if src_version_id:
859 src += '?versionId=%s' % src_version_id
860 headers[provider.copy_source_header] = str(src)
861 # make sure storage_class_header key exists before accessing it
862 if provider.storage_class_header and storage_class:
863 headers[provider.storage_class_header] = storage_class
864 if metadata is not None:
865 headers[provider.metadata_directive_header] = 'REPLACE'
866 headers = boto.utils.merge_meta(headers, metadata, provider)
867 elif not query_args: # Can't use this header with multi-part copy.
868 headers[provider.metadata_directive_header] = 'COPY'
869 response = self.connection.make_request('PUT', self.name, new_key_name,
870 headers=headers,
871 query_args=query_args)
872 body = response.read()
873 if response.status == 200:
874 key = self.new_key(new_key_name)
875 h = handler.XmlHandler(key, self)
876 if not isinstance(body, bytes):
877 body = body.encode('utf-8')
878 xml.sax.parseString(body, h)
879 if hasattr(key, 'Error'):
880 raise provider.storage_copy_error(key.Code, key.Message, body)
881 key.handle_version_headers(response)
882 key.handle_addl_headers(response.getheaders())
883 if preserve_acl:
884 self.set_xml_acl(acl, new_key_name)
885 return key
886 else:
887 raise provider.storage_response_error(response.status,
888 response.reason, body)
889
890 def set_canned_acl(self, acl_str, key_name='', headers=None,
891 version_id=None):
892 assert acl_str in CannedACLStrings
893
894 if headers:
895 headers[self.connection.provider.acl_header] = acl_str
896 else:
897 headers = {self.connection.provider.acl_header: acl_str}
898
899 query_args = 'acl'
900 if version_id:
901 query_args += '&versionId=%s' % version_id
902 response = self.connection.make_request('PUT', self.name, key_name,
903 headers=headers, query_args=query_args)
904 body = response.read()
905 if response.status != 200:
906 raise self.connection.provider.storage_response_error(
907 response.status, response.reason, body)
908
909 def get_xml_acl(self, key_name='', headers=None, version_id=None):
910 query_args = 'acl'
911 if version_id:
912 query_args += '&versionId=%s' % version_id
913 response = self.connection.make_request('GET', self.name, key_name,
914 query_args=query_args,
915 headers=headers)
916 body = response.read()
917 if response.status != 200:
918 raise self.connection.provider.storage_response_error(
919 response.status, response.reason, body)
920 return body
921
922 def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None,
923 query_args='acl'):
924 if version_id:
925 query_args += '&versionId=%s' % version_id
926 if not isinstance(acl_str, bytes):
927 acl_str = acl_str.encode('utf-8')
928 response = self.connection.make_request('PUT', self.name, key_name,
929 data=acl_str,
930 query_args=query_args,
931 headers=headers)
932 body = response.read()
933 if response.status != 200:
934 raise self.connection.provider.storage_response_error(
935 response.status, response.reason, body)
936
937 def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
938 if isinstance(acl_or_str, Policy):
939 self.set_xml_acl(acl_or_str.to_xml(), key_name,
940 headers, version_id)
941 else:
942 self.set_canned_acl(acl_or_str, key_name,
943 headers, version_id)
944
945 def get_acl(self, key_name='', headers=None, version_id=None):
946 query_args = 'acl'
947 if version_id:
948 query_args += '&versionId=%s' % version_id
949 response = self.connection.make_request('GET', self.name, key_name,
950 query_args=query_args,
951 headers=headers)
952 body = response.read()
953 if response.status == 200:
954 policy = Policy(self)
955 h = handler.XmlHandler(policy, self)
956 if not isinstance(body, bytes):
957 body = body.encode('utf-8')
958 xml.sax.parseString(body, h)
959 return policy
960 else:
961 raise self.connection.provider.storage_response_error(
962 response.status, response.reason, body)
963
964 def set_subresource(self, subresource, value, key_name='', headers=None,
965 version_id=None):
966 """
967 Set a subresource for a bucket or key.
968
969 :type subresource: string
970 :param subresource: The subresource to set.
971
972 :type value: string
973 :param value: The value of the subresource.
974
975 :type key_name: string
976 :param key_name: The key to operate on, or None to operate on the
977 bucket.
978
979 :type headers: dict
980 :param headers: Additional HTTP headers to include in the request.
981
982 :type src_version_id: string
983 :param src_version_id: Optional. The version id of the key to
984 operate on. If not specified, operate on the newest
985 version.
986 """
987 if not subresource:
988 raise TypeError('set_subresource called with subresource=None')
989 query_args = subresource
990 if version_id:
991 query_args += '&versionId=%s' % version_id
992 if not isinstance(value, bytes):
993 value = value.encode('utf-8')
994 response = self.connection.make_request('PUT', self.name, key_name,
995 data=value,
996 query_args=query_args,
997 headers=headers)
998 body = response.read()
999 if response.status != 200:
1000 raise self.connection.provider.storage_response_error(
1001 response.status, response.reason, body)
1002
1003 def get_subresource(self, subresource, key_name='', headers=None,
1004 version_id=None):
1005 """
1006 Get a subresource for a bucket or key.
1007
1008 :type subresource: string
1009 :param subresource: The subresource to get.
1010
1011 :type key_name: string
1012 :param key_name: The key to operate on, or None to operate on the
1013 bucket.
1014
1015 :type headers: dict
1016 :param headers: Additional HTTP headers to include in the request.
1017
1018 :type src_version_id: string
1019 :param src_version_id: Optional. The version id of the key to
1020 operate on. If not specified, operate on the newest
1021 version.
1022
1023 :rtype: string
1024 :returns: The value of the subresource.
1025 """
1026 if not subresource:
1027 raise TypeError('get_subresource called with subresource=None')
1028 query_args = subresource
1029 if version_id:
1030 query_args += '&versionId=%s' % version_id
1031 response = self.connection.make_request('GET', self.name, key_name,
1032 query_args=query_args,
1033 headers=headers)
1034 body = response.read()
1035 if response.status != 200:
1036 raise self.connection.provider.storage_response_error(
1037 response.status, response.reason, body)
1038 return body
1039
1040 def make_public(self, recursive=False, headers=None):
1041 self.set_canned_acl('public-read', headers=headers)
1042 if recursive:
1043 for key in self:
1044 self.set_canned_acl('public-read', key.name, headers=headers)
1045
1046 def add_email_grant(self, permission, email_address,
1047 recursive=False, headers=None):
1048 """
1049 Convenience method that provides a quick way to add an email grant
1050 to a bucket. This method retrieves the current ACL, creates a new
1051 grant based on the parameters passed in, adds that grant to the ACL
1052 and then PUT's the new ACL back to S3.
1053
1054 :type permission: string
1055 :param permission: The permission being granted. Should be one of:
1056 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
1057
1058 :type email_address: string
1059 :param email_address: The email address associated with the AWS
1060 account your are granting the permission to.
1061
1062 :type recursive: boolean
1063 :param recursive: A boolean value to controls whether the
1064 command will apply the grant to all keys within the bucket
1065 or not. The default value is False. By passing a True
1066 value, the call will iterate through all keys in the
1067 bucket and apply the same grant to each key. CAUTION: If
1068 you have a lot of keys, this could take a long time!
1069 """
1070 if permission not in S3Permissions:
1071 raise self.connection.provider.storage_permissions_error(
1072 'Unknown Permission: %s' % permission)
1073 policy = self.get_acl(headers=headers)
1074 policy.acl.add_email_grant(permission, email_address)
1075 self.set_acl(policy, headers=headers)
1076 if recursive:
1077 for key in self:
1078 key.add_email_grant(permission, email_address, headers=headers)
1079
1080 def add_user_grant(self, permission, user_id, recursive=False,
1081 headers=None, display_name=None):
1082 """
1083 Convenience method that provides a quick way to add a canonical
1084 user grant to a bucket. This method retrieves the current ACL,
1085 creates a new grant based on the parameters passed in, adds that
1086 grant to the ACL and then PUT's the new ACL back to S3.
1087
1088 :type permission: string
1089 :param permission: The permission being granted. Should be one of:
1090 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
1091
1092 :type user_id: string
1093 :param user_id: The canonical user id associated with the AWS
1094 account your are granting the permission to.
1095
1096 :type recursive: boolean
1097 :param recursive: A boolean value to controls whether the
1098 command will apply the grant to all keys within the bucket
1099 or not. The default value is False. By passing a True
1100 value, the call will iterate through all keys in the
1101 bucket and apply the same grant to each key. CAUTION: If
1102 you have a lot of keys, this could take a long time!
1103
1104 :type display_name: string
1105 :param display_name: An option string containing the user's
1106 Display Name. Only required on Walrus.
1107 """
1108 if permission not in S3Permissions:
1109 raise self.connection.provider.storage_permissions_error(
1110 'Unknown Permission: %s' % permission)
1111 policy = self.get_acl(headers=headers)
1112 policy.acl.add_user_grant(permission, user_id,
1113 display_name=display_name)
1114 self.set_acl(policy, headers=headers)
1115 if recursive:
1116 for key in self:
1117 key.add_user_grant(permission, user_id, headers=headers,
1118 display_name=display_name)
1119
1120 def list_grants(self, headers=None):
1121 policy = self.get_acl(headers=headers)
1122 return policy.acl.grants
1123
1124 def get_location(self):
1125 """
1126 Returns the LocationConstraint for the bucket.
1127
1128 :rtype: str
1129 :return: The LocationConstraint for the bucket or the empty
1130 string if no constraint was specified when bucket was created.
1131 """
1132 response = self.connection.make_request('GET', self.name,
1133 query_args='location')
1134 body = response.read()
1135 if response.status == 200:
1136 rs = ResultSet(self)
1137 h = handler.XmlHandler(rs, self)
1138 if not isinstance(body, bytes):
1139 body = body.encode('utf-8')
1140 xml.sax.parseString(body, h)
1141 return rs.LocationConstraint
1142 else:
1143 raise self.connection.provider.storage_response_error(
1144 response.status, response.reason, body)
1145
1146 def set_xml_logging(self, logging_str, headers=None):
1147 """
1148 Set logging on a bucket directly to the given xml string.
1149
1150 :type logging_str: unicode string
1151 :param logging_str: The XML for the bucketloggingstatus which
1152 will be set. The string will be converted to utf-8 before
1153 it is sent. Usually, you will obtain this XML from the
1154 BucketLogging object.
1155
1156 :rtype: bool
1157 :return: True if ok or raises an exception.
1158 """
1159 body = logging_str
1160 if not isinstance(body, bytes):
1161 body = body.encode('utf-8')
1162 response = self.connection.make_request('PUT', self.name, data=body,
1163 query_args='logging', headers=headers)
1164 body = response.read()
1165 if response.status == 200:
1166 return True
1167 else:
1168 raise self.connection.provider.storage_response_error(
1169 response.status, response.reason, body)
1170
1171 def enable_logging(self, target_bucket, target_prefix='',
1172 grants=None, headers=None):
1173 """
1174 Enable logging on a bucket.
1175
1176 :type target_bucket: bucket or string
1177 :param target_bucket: The bucket to log to.
1178
1179 :type target_prefix: string
1180 :param target_prefix: The prefix which should be prepended to the
1181 generated log files written to the target_bucket.
1182
1183 :type grants: list of Grant objects
1184 :param grants: A list of extra permissions which will be granted on
1185 the log files which are created.
1186
1187 :rtype: bool
1188 :return: True if ok or raises an exception.
1189 """
1190 if isinstance(target_bucket, Bucket):
1191 target_bucket = target_bucket.name
1192 blogging = BucketLogging(target=target_bucket, prefix=target_prefix,
1193 grants=grants)
1194 return self.set_xml_logging(blogging.to_xml(), headers=headers)
1195
1196 def disable_logging(self, headers=None):
1197 """
1198 Disable logging on a bucket.
1199
1200 :rtype: bool
1201 :return: True if ok or raises an exception.
1202 """
1203 blogging = BucketLogging()
1204 return self.set_xml_logging(blogging.to_xml(), headers=headers)
1205
1206 def get_logging_status(self, headers=None):
1207 """
1208 Get the logging status for this bucket.
1209
1210 :rtype: :class:`boto.s3.bucketlogging.BucketLogging`
1211 :return: A BucketLogging object for this bucket.
1212 """
1213 response = self.connection.make_request('GET', self.name,
1214 query_args='logging', headers=headers)
1215 body = response.read()
1216 if response.status == 200:
1217 blogging = BucketLogging()
1218 h = handler.XmlHandler(blogging, self)
1219 if not isinstance(body, bytes):
1220 body = body.encode('utf-8')
1221 xml.sax.parseString(body, h)
1222 return blogging
1223 else:
1224 raise self.connection.provider.storage_response_error(
1225 response.status, response.reason, body)
1226
1227 def set_as_logging_target(self, headers=None):
1228 """
1229 Setup the current bucket as a logging target by granting the necessary
1230 permissions to the LogDelivery group to write log files to this bucket.
1231 """
1232 policy = self.get_acl(headers=headers)
1233 g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup)
1234 g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup)
1235 policy.acl.add_grant(g1)
1236 policy.acl.add_grant(g2)
1237 self.set_acl(policy, headers=headers)
1238
1239 def get_request_payment(self, headers=None):
1240 response = self.connection.make_request('GET', self.name,
1241 query_args='requestPayment', headers=headers)
1242 body = response.read()
1243 if response.status == 200:
1244 return body
1245 else:
1246 raise self.connection.provider.storage_response_error(
1247 response.status, response.reason, body)
1248
1249 def set_request_payment(self, payer='BucketOwner', headers=None):
1250 body = self.BucketPaymentBody % payer
1251 response = self.connection.make_request('PUT', self.name, data=body,
1252 query_args='requestPayment', headers=headers)
1253 body = response.read()
1254 if response.status == 200:
1255 return True
1256 else:
1257 raise self.connection.provider.storage_response_error(
1258 response.status, response.reason, body)
1259
1260 def configure_versioning(self, versioning, mfa_delete=False,
1261 mfa_token=None, headers=None):
1262 """
1263 Configure versioning for this bucket.
1264
1265 ..note:: This feature is currently in beta.
1266
1267 :type versioning: bool
1268 :param versioning: A boolean indicating whether version is
1269 enabled (True) or disabled (False).
1270
1271 :type mfa_delete: bool
1272 :param mfa_delete: A boolean indicating whether the
1273 Multi-Factor Authentication Delete feature is enabled
1274 (True) or disabled (False). If mfa_delete is enabled then
1275 all Delete operations will require the token from your MFA
1276 device to be passed in the request.
1277
1278 :type mfa_token: tuple or list of strings
1279 :param mfa_token: A tuple or list consisting of the serial
1280 number from the MFA device and the current value of the
1281 six-digit token associated with the device. This value is
1282 required when you are changing the status of the MfaDelete
1283 property of the bucket.
1284 """
1285 if versioning:
1286 ver = 'Enabled'
1287 else:
1288 ver = 'Suspended'
1289 if mfa_delete:
1290 mfa = 'Enabled'
1291 else:
1292 mfa = 'Disabled'
1293 body = self.VersioningBody % (ver, mfa)
1294 if mfa_token:
1295 if not headers:
1296 headers = {}
1297 provider = self.connection.provider
1298 headers[provider.mfa_header] = ' '.join(mfa_token)
1299 response = self.connection.make_request('PUT', self.name, data=body,
1300 query_args='versioning', headers=headers)
1301 body = response.read()
1302 if response.status == 200:
1303 return True
1304 else:
1305 raise self.connection.provider.storage_response_error(
1306 response.status, response.reason, body)
1307
1308 def get_versioning_status(self, headers=None):
1309 """
1310 Returns the current status of versioning on the bucket.
1311
1312 :rtype: dict
1313 :returns: A dictionary containing a key named 'Versioning'
1314 that can have a value of either Enabled, Disabled, or
1315 Suspended. Also, if MFADelete has ever been enabled on the
1316 bucket, the dictionary will contain a key named
1317 'MFADelete' which will have a value of either Enabled or
1318 Suspended.
1319 """
1320 response = self.connection.make_request('GET', self.name,
1321 query_args='versioning', headers=headers)
1322 body = response.read()
1323 if not isinstance(body, six.string_types):
1324 body = body.decode('utf-8')
1325 boto.log.debug(body)
1326 if response.status == 200:
1327 d = {}
1328 ver = re.search(self.VersionRE, body)
1329 if ver:
1330 d['Versioning'] = ver.group(1)
1331 mfa = re.search(self.MFADeleteRE, body)
1332 if mfa:
1333 d['MfaDelete'] = mfa.group(1)
1334 return d
1335 else:
1336 raise self.connection.provider.storage_response_error(
1337 response.status, response.reason, body)
1338
1339 def configure_lifecycle(self, lifecycle_config, headers=None):
1340 """
1341 Configure lifecycle for this bucket.
1342
1343 :type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle`
1344 :param lifecycle_config: The lifecycle configuration you want
1345 to configure for this bucket.
1346 """
1347 xml = lifecycle_config.to_xml()
1348 #xml = xml.encode('utf-8')
1349 fp = StringIO(xml)
1350 md5 = boto.utils.compute_md5(fp)
1351 if headers is None:
1352 headers = {}
1353 headers['Content-MD5'] = md5[1]
1354 headers['Content-Type'] = 'text/xml'
1355 response = self.connection.make_request('PUT', self.name,
1356 data=fp.getvalue(),
1357 query_args='lifecycle',
1358 headers=headers)
1359 body = response.read()
1360 if response.status == 200:
1361 return True
1362 else:
1363 raise self.connection.provider.storage_response_error(
1364 response.status, response.reason, body)
1365
1366 def get_lifecycle_config(self, headers=None):
1367 """
1368 Returns the current lifecycle configuration on the bucket.
1369
1370 :rtype: :class:`boto.s3.lifecycle.Lifecycle`
1371 :returns: A LifecycleConfig object that describes all current
1372 lifecycle rules in effect for the bucket.
1373 """
1374 response = self.connection.make_request('GET', self.name,
1375 query_args='lifecycle', headers=headers)
1376 body = response.read()
1377 boto.log.debug(body)
1378 if response.status == 200:
1379 lifecycle = Lifecycle()
1380 h = handler.XmlHandler(lifecycle, self)
1381 if not isinstance(body, bytes):
1382 body = body.encode('utf-8')
1383 xml.sax.parseString(body, h)
1384 return lifecycle
1385 else:
1386 raise self.connection.provider.storage_response_error(
1387 response.status, response.reason, body)
1388
1389 def delete_lifecycle_configuration(self, headers=None):
1390 """
1391 Removes all lifecycle configuration from the bucket.
1392 """
1393 response = self.connection.make_request('DELETE', self.name,
1394 query_args='lifecycle',
1395 headers=headers)
1396 body = response.read()
1397 boto.log.debug(body)
1398 if response.status == 204:
1399 return True
1400 else:
1401 raise self.connection.provider.storage_response_error(
1402 response.status, response.reason, body)
1403
1404 def configure_website(self, suffix=None, error_key=None,
1405 redirect_all_requests_to=None,
1406 routing_rules=None,
1407 headers=None):
1408 """
1409 Configure this bucket to act as a website
1410
1411 :type suffix: str
1412 :param suffix: Suffix that is appended to a request that is for a
1413 "directory" on the website endpoint (e.g. if the suffix is
1414 index.html and you make a request to samplebucket/images/
1415 the data that is returned will be for the object with the
1416 key name images/index.html). The suffix must not be empty
1417 and must not include a slash character.
1418
1419 :type error_key: str
1420 :param error_key: The object key name to use when a 4XX class
1421 error occurs. This is optional.
1422
1423 :type redirect_all_requests_to: :class:`boto.s3.website.RedirectLocation`
1424 :param redirect_all_requests_to: Describes the redirect behavior for
1425 every request to this bucket's website endpoint. If this value is
1426 non None, no other values are considered when configuring the
1427 website configuration for the bucket. This is an instance of
1428 ``RedirectLocation``.
1429
1430 :type routing_rules: :class:`boto.s3.website.RoutingRules`
1431 :param routing_rules: Object which specifies conditions
1432 and redirects that apply when the conditions are met.
1433
1434 """
1435 config = website.WebsiteConfiguration(
1436 suffix, error_key, redirect_all_requests_to,
1437 routing_rules)
1438 return self.set_website_configuration(config, headers=headers)
1439
1440 def set_website_configuration(self, config, headers=None):
1441 """
1442 :type config: boto.s3.website.WebsiteConfiguration
1443 :param config: Configuration data
1444 """
1445 return self.set_website_configuration_xml(config.to_xml(),
1446 headers=headers)
1447
1448
1449 def set_website_configuration_xml(self, xml, headers=None):
1450 """Upload xml website configuration"""
1451 response = self.connection.make_request('PUT', self.name, data=xml,
1452 query_args='website',
1453 headers=headers)
1454 body = response.read()
1455 if response.status == 200:
1456 return True
1457 else:
1458 raise self.connection.provider.storage_response_error(
1459 response.status, response.reason, body)
1460
1461 def get_website_configuration(self, headers=None):
1462 """
1463 Returns the current status of website configuration on the bucket.
1464
1465 :rtype: dict
1466 :returns: A dictionary containing a Python representation
1467 of the XML response from S3. The overall structure is:
1468
1469 * WebsiteConfiguration
1470
1471 * IndexDocument
1472
1473 * Suffix : suffix that is appended to request that
1474 is for a "directory" on the website endpoint
1475 * ErrorDocument
1476
1477 * Key : name of object to serve when an error occurs
1478
1479 """
1480 return self.get_website_configuration_with_xml(headers)[0]
1481
1482 def get_website_configuration_obj(self, headers=None):
1483 """Get the website configuration as a
1484 :class:`boto.s3.website.WebsiteConfiguration` object.
1485 """
1486 config_xml = self.get_website_configuration_xml(headers=headers)
1487 config = website.WebsiteConfiguration()
1488 h = handler.XmlHandler(config, self)
1489 xml.sax.parseString(config_xml, h)
1490 return config
1491
1492 def get_website_configuration_with_xml(self, headers=None):
1493 """
1494 Returns the current status of website configuration on the bucket as
1495 unparsed XML.
1496
1497 :rtype: 2-Tuple
1498 :returns: 2-tuple containing:
1499
1500 1) A dictionary containing a Python representation \
1501 of the XML response. The overall structure is:
1502
1503 * WebsiteConfiguration
1504
1505 * IndexDocument
1506
1507 * Suffix : suffix that is appended to request that \
1508 is for a "directory" on the website endpoint
1509
1510 * ErrorDocument
1511
1512 * Key : name of object to serve when an error occurs
1513
1514
1515 2) unparsed XML describing the bucket's website configuration
1516
1517 """
1518
1519 body = self.get_website_configuration_xml(headers=headers)
1520 e = boto.jsonresponse.Element()
1521 h = boto.jsonresponse.XmlHandler(e, None)
1522 h.parse(body)
1523 return e, body
1524
1525 def get_website_configuration_xml(self, headers=None):
1526 """Get raw website configuration xml"""
1527 response = self.connection.make_request('GET', self.name,
1528 query_args='website', headers=headers)
1529 body = response.read().decode('utf-8')
1530 boto.log.debug(body)
1531
1532 if response.status != 200:
1533 raise self.connection.provider.storage_response_error(
1534 response.status, response.reason, body)
1535 return body
1536
1537 def delete_website_configuration(self, headers=None):
1538 """
1539 Removes all website configuration from the bucket.
1540 """
1541 response = self.connection.make_request('DELETE', self.name,
1542 query_args='website', headers=headers)
1543 body = response.read()
1544 boto.log.debug(body)
1545 if response.status == 204:
1546 return True
1547 else:
1548 raise self.connection.provider.storage_response_error(
1549 response.status, response.reason, body)
1550
1551 def get_website_endpoint(self):
1552 """
1553 Returns the fully qualified hostname to use is you want to access this
1554 bucket as a website. This doesn't validate whether the bucket has
1555 been correctly configured as a website or not.
1556 """
1557 l = [self.name]
1558 l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location()))
1559 l.append('.'.join(self.connection.host.split('.')[-2:]))
1560 return '.'.join(l)
1561
1562 def get_policy(self, headers=None):
1563 """
1564 Returns the JSON policy associated with the bucket. The policy
1565 is returned as an uninterpreted JSON string.
1566 """
1567 response = self.connection.make_request('GET', self.name,
1568 query_args='policy', headers=headers)
1569 body = response.read()
1570 if response.status == 200:
1571 return body
1572 else:
1573 raise self.connection.provider.storage_response_error(
1574 response.status, response.reason, body)
1575
1576 def set_policy(self, policy, headers=None):
1577 """
1578 Add or replace the JSON policy associated with the bucket.
1579
1580 :type policy: str
1581 :param policy: The JSON policy as a string.
1582 """
1583 response = self.connection.make_request('PUT', self.name,
1584 data=policy,
1585 query_args='policy',
1586 headers=headers)
1587 body = response.read()
1588 if response.status >= 200 and response.status <= 204:
1589 return True
1590 else:
1591 raise self.connection.provider.storage_response_error(
1592 response.status, response.reason, body)
1593
1594 def delete_policy(self, headers=None):
1595 response = self.connection.make_request('DELETE', self.name,
1596 data='/?policy',
1597 query_args='policy',
1598 headers=headers)
1599 body = response.read()
1600 if response.status >= 200 and response.status <= 204:
1601 return True
1602 else:
1603 raise self.connection.provider.storage_response_error(
1604 response.status, response.reason, body)
1605
1606 def set_cors_xml(self, cors_xml, headers=None):
1607 """
1608 Set the CORS (Cross-Origin Resource Sharing) for a bucket.
1609
1610 :type cors_xml: str
1611 :param cors_xml: The XML document describing your desired
1612 CORS configuration. See the S3 documentation for details
1613 of the exact syntax required.
1614 """
1615 fp = StringIO(cors_xml)
1616 md5 = boto.utils.compute_md5(fp)
1617 if headers is None:
1618 headers = {}
1619 headers['Content-MD5'] = md5[1]
1620 headers['Content-Type'] = 'text/xml'
1621 response = self.connection.make_request('PUT', self.name,
1622 data=fp.getvalue(),
1623 query_args='cors',
1624 headers=headers)
1625 body = response.read()
1626 if response.status == 200:
1627 return True
1628 else:
1629 raise self.connection.provider.storage_response_error(
1630 response.status, response.reason, body)
1631
1632 def set_cors(self, cors_config, headers=None):
1633 """
1634 Set the CORS for this bucket given a boto CORSConfiguration
1635 object.
1636
1637 :type cors_config: :class:`boto.s3.cors.CORSConfiguration`
1638 :param cors_config: The CORS configuration you want
1639 to configure for this bucket.
1640 """
1641 return self.set_cors_xml(cors_config.to_xml())
1642
1643 def get_cors_xml(self, headers=None):
1644 """
1645 Returns the current CORS configuration on the bucket as an
1646 XML document.
1647 """
1648 response = self.connection.make_request('GET', self.name,
1649 query_args='cors', headers=headers)
1650 body = response.read()
1651 boto.log.debug(body)
1652 if response.status == 200:
1653 return body
1654 else:
1655 raise self.connection.provider.storage_response_error(
1656 response.status, response.reason, body)
1657
1658 def get_cors(self, headers=None):
1659 """
1660 Returns the current CORS configuration on the bucket.
1661
1662 :rtype: :class:`boto.s3.cors.CORSConfiguration`
1663 :returns: A CORSConfiguration object that describes all current
1664 CORS rules in effect for the bucket.
1665 """
1666 body = self.get_cors_xml(headers)
1667 cors = CORSConfiguration()
1668 h = handler.XmlHandler(cors, self)
1669 xml.sax.parseString(body, h)
1670 return cors
1671
1672 def delete_cors(self, headers=None):
1673 """
1674 Removes all CORS configuration from the bucket.
1675 """
1676 response = self.connection.make_request('DELETE', self.name,
1677 query_args='cors',
1678 headers=headers)
1679 body = response.read()
1680 boto.log.debug(body)
1681 if response.status == 204:
1682 return True
1683 else:
1684 raise self.connection.provider.storage_response_error(
1685 response.status, response.reason, body)
1686
1687 def initiate_multipart_upload(self, key_name, headers=None,
1688 reduced_redundancy=False,
1689 metadata=None, encrypt_key=False,
1690 policy=None):
1691 """
1692 Start a multipart upload operation.
1693
1694 .. note::
1695
1696 Note: After you initiate multipart upload and upload one or more
1697 parts, you must either complete or abort multipart upload in order
1698 to stop getting charged for storage of the uploaded parts. Only
1699 after you either complete or abort multipart upload, Amazon S3
1700 frees up the parts storage and stops charging you for the parts
1701 storage.
1702
1703 :type key_name: string
1704 :param key_name: The name of the key that will ultimately
1705 result from this multipart upload operation. This will be
1706 exactly as the key appears in the bucket after the upload
1707 process has been completed.
1708
1709 :type headers: dict
1710 :param headers: Additional HTTP headers to send and store with the
1711 resulting key in S3.
1712
1713 :type reduced_redundancy: boolean
1714 :param reduced_redundancy: In multipart uploads, the storage
1715 class is specified when initiating the upload, not when
1716 uploading individual parts. So if you want the resulting
1717 key to use the reduced redundancy storage class set this
1718 flag when you initiate the upload.
1719
1720 :type metadata: dict
1721 :param metadata: Any metadata that you would like to set on the key
1722 that results from the multipart upload.
1723
1724 :type encrypt_key: bool
1725 :param encrypt_key: If True, the new copy of the object will
1726 be encrypted on the server-side by S3 and will be stored
1727 in an encrypted form while at rest in S3.
1728
1729 :type policy: :class:`boto.s3.acl.CannedACLStrings`
1730 :param policy: A canned ACL policy that will be applied to the
1731 new key (once completed) in S3.
1732 """
1733 query_args = 'uploads'
1734 provider = self.connection.provider
1735 headers = headers or {}
1736 if policy:
1737 headers[provider.acl_header] = policy
1738 if reduced_redundancy:
1739 storage_class_header = provider.storage_class_header
1740 if storage_class_header:
1741 headers[storage_class_header] = 'REDUCED_REDUNDANCY'
1742 # TODO: what if the provider doesn't support reduced redundancy?
1743 # (see boto.s3.key.Key.set_contents_from_file)
1744 if encrypt_key:
1745 headers[provider.server_side_encryption_header] = 'AES256'
1746 if metadata is None:
1747 metadata = {}
1748
1749 headers = boto.utils.merge_meta(headers, metadata,
1750 self.connection.provider)
1751 response = self.connection.make_request('POST', self.name, key_name,
1752 query_args=query_args,
1753 headers=headers)
1754 body = response.read()
1755 boto.log.debug(body)
1756 if response.status == 200:
1757 resp = MultiPartUpload(self)
1758 h = handler.XmlHandler(resp, self)
1759 if not isinstance(body, bytes):
1760 body = body.encode('utf-8')
1761 xml.sax.parseString(body, h)
1762 return resp
1763 else:
1764 raise self.connection.provider.storage_response_error(
1765 response.status, response.reason, body)
1766
1767 def complete_multipart_upload(self, key_name, upload_id,
1768 xml_body, headers=None):
1769 """
1770 Complete a multipart upload operation.
1771 """
1772 query_args = 'uploadId=%s' % upload_id
1773 if headers is None:
1774 headers = {}
1775 headers['Content-Type'] = 'text/xml'
1776 response = self.connection.make_request('POST', self.name, key_name,
1777 query_args=query_args,
1778 headers=headers, data=xml_body)
1779 contains_error = False
1780 body = response.read().decode('utf-8')
1781 # Some errors will be reported in the body of the response
1782 # even though the HTTP response code is 200. This check
1783 # does a quick and dirty peek in the body for an error element.
1784 if body.find('<Error>') > 0:
1785 contains_error = True
1786 boto.log.debug(body)
1787 if response.status == 200 and not contains_error:
1788 resp = CompleteMultiPartUpload(self)
1789 h = handler.XmlHandler(resp, self)
1790 if not isinstance(body, bytes):
1791 body = body.encode('utf-8')
1792 xml.sax.parseString(body, h)
1793 # Use a dummy key to parse various response headers
1794 # for versioning, encryption info and then explicitly
1795 # set the completed MPU object values from key.
1796 k = self.key_class(self)
1797 k.handle_version_headers(response)
1798 k.handle_encryption_headers(response)
1799 resp.version_id = k.version_id
1800 resp.encrypted = k.encrypted
1801 return resp
1802 else:
1803 raise self.connection.provider.storage_response_error(
1804 response.status, response.reason, body)
1805
1806 def cancel_multipart_upload(self, key_name, upload_id, headers=None):
1807 """
1808 To verify that all parts have been removed, so you don't get charged
1809 for the part storage, you should call the List Parts operation and
1810 ensure the parts list is empty.
1811 """
1812 query_args = 'uploadId=%s' % upload_id
1813 response = self.connection.make_request('DELETE', self.name, key_name,
1814 query_args=query_args,
1815 headers=headers)
1816 body = response.read()
1817 boto.log.debug(body)
1818 if response.status != 204:
1819 raise self.connection.provider.storage_response_error(
1820 response.status, response.reason, body)
1821
1822 def delete(self, headers=None):
1823 return self.connection.delete_bucket(self.name, headers=headers)
1824
1825 def get_tags(self):
1826 response = self.get_xml_tags()
1827 tags = Tags()
1828 h = handler.XmlHandler(tags, self)
1829 if not isinstance(response, bytes):
1830 response = response.encode('utf-8')
1831 xml.sax.parseString(response, h)
1832 return tags
1833
1834 def get_xml_tags(self):
1835 response = self.connection.make_request('GET', self.name,
1836 query_args='tagging',
1837 headers=None)
1838 body = response.read()
1839 if response.status == 200:
1840 return body
1841 else:
1842 raise self.connection.provider.storage_response_error(
1843 response.status, response.reason, body)
1844
1845 def set_xml_tags(self, tag_str, headers=None, query_args='tagging'):
1846 if headers is None:
1847 headers = {}
1848 md5 = boto.utils.compute_md5(StringIO(tag_str))
1849 headers['Content-MD5'] = md5[1]
1850 headers['Content-Type'] = 'text/xml'
1851 if not isinstance(tag_str, bytes):
1852 tag_str = tag_str.encode('utf-8')
1853 response = self.connection.make_request('PUT', self.name,
1854 data=tag_str,
1855 query_args=query_args,
1856 headers=headers)
1857 body = response.read()
1858 if response.status != 204:
1859 raise self.connection.provider.storage_response_error(
1860 response.status, response.reason, body)
1861 return True
1862
1863 def set_tags(self, tags, headers=None):
1864 return self.set_xml_tags(tags.to_xml(), headers=headers)
1865
1866 def delete_tags(self, headers=None):
1867 response = self.connection.make_request('DELETE', self.name,
1868 query_args='tagging',
1869 headers=headers)
1870 body = response.read()
1871 boto.log.debug(body)
1872 if response.status == 204:
1873 return True
1874 else:
1875 raise self.connection.provider.storage_response_error(
1876 response.status, response.reason, body)