##// END OF EJS Templates
declare local constants instead of using magic values and comments
Mads Kiilerich -
r17429:72fa4ef2 default
parent child Browse files
Show More
@@ -1,291 +1,295 b''
1 # archival.py - revision archival for mercurial
1 # archival.py - revision archival for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex
9 from node import hex
10 import match as matchmod
10 import match as matchmod
11 import cmdutil
11 import cmdutil
12 import scmutil, util, encoding
12 import scmutil, util, encoding
13 import cStringIO, os, tarfile, time, zipfile
13 import cStringIO, os, tarfile, time, zipfile
14 import zlib, gzip
14 import zlib, gzip
15
15
16 # from unzip source code:
17 _UNX_IFREG = 0x8000
18 _UNX_IFLNK = 0xa000
19
16 def tidyprefix(dest, kind, prefix):
20 def tidyprefix(dest, kind, prefix):
17 '''choose prefix to use for names in archive. make sure prefix is
21 '''choose prefix to use for names in archive. make sure prefix is
18 safe for consumers.'''
22 safe for consumers.'''
19
23
20 if prefix:
24 if prefix:
21 prefix = util.normpath(prefix)
25 prefix = util.normpath(prefix)
22 else:
26 else:
23 if not isinstance(dest, str):
27 if not isinstance(dest, str):
24 raise ValueError('dest must be string if no prefix')
28 raise ValueError('dest must be string if no prefix')
25 prefix = os.path.basename(dest)
29 prefix = os.path.basename(dest)
26 lower = prefix.lower()
30 lower = prefix.lower()
27 for sfx in exts.get(kind, []):
31 for sfx in exts.get(kind, []):
28 if lower.endswith(sfx):
32 if lower.endswith(sfx):
29 prefix = prefix[:-len(sfx)]
33 prefix = prefix[:-len(sfx)]
30 break
34 break
31 lpfx = os.path.normpath(util.localpath(prefix))
35 lpfx = os.path.normpath(util.localpath(prefix))
32 prefix = util.pconvert(lpfx)
36 prefix = util.pconvert(lpfx)
33 if not prefix.endswith('/'):
37 if not prefix.endswith('/'):
34 prefix += '/'
38 prefix += '/'
35 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
39 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
36 raise util.Abort(_('archive prefix contains illegal components'))
40 raise util.Abort(_('archive prefix contains illegal components'))
37 return prefix
41 return prefix
38
42
39 exts = {
43 exts = {
40 'tar': ['.tar'],
44 'tar': ['.tar'],
41 'tbz2': ['.tbz2', '.tar.bz2'],
45 'tbz2': ['.tbz2', '.tar.bz2'],
42 'tgz': ['.tgz', '.tar.gz'],
46 'tgz': ['.tgz', '.tar.gz'],
43 'zip': ['.zip'],
47 'zip': ['.zip'],
44 }
48 }
45
49
46 def guesskind(dest):
50 def guesskind(dest):
47 for kind, extensions in exts.iteritems():
51 for kind, extensions in exts.iteritems():
48 if util.any(dest.endswith(ext) for ext in extensions):
52 if util.any(dest.endswith(ext) for ext in extensions):
49 return kind
53 return kind
50 return None
54 return None
51
55
52
56
53 class tarit(object):
57 class tarit(object):
54 '''write archive to tar file or stream. can write uncompressed,
58 '''write archive to tar file or stream. can write uncompressed,
55 or compress with gzip or bzip2.'''
59 or compress with gzip or bzip2.'''
56
60
57 class GzipFileWithTime(gzip.GzipFile):
61 class GzipFileWithTime(gzip.GzipFile):
58
62
59 def __init__(self, *args, **kw):
63 def __init__(self, *args, **kw):
60 timestamp = None
64 timestamp = None
61 if 'timestamp' in kw:
65 if 'timestamp' in kw:
62 timestamp = kw.pop('timestamp')
66 timestamp = kw.pop('timestamp')
63 if timestamp is None:
67 if timestamp is None:
64 self.timestamp = time.time()
68 self.timestamp = time.time()
65 else:
69 else:
66 self.timestamp = timestamp
70 self.timestamp = timestamp
67 gzip.GzipFile.__init__(self, *args, **kw)
71 gzip.GzipFile.__init__(self, *args, **kw)
68
72
69 def _write_gzip_header(self):
73 def _write_gzip_header(self):
70 self.fileobj.write('\037\213') # magic header
74 self.fileobj.write('\037\213') # magic header
71 self.fileobj.write('\010') # compression method
75 self.fileobj.write('\010') # compression method
72 # Python 2.6 deprecates self.filename
76 # Python 2.6 deprecates self.filename
73 fname = getattr(self, 'name', None) or self.filename
77 fname = getattr(self, 'name', None) or self.filename
74 if fname and fname.endswith('.gz'):
78 if fname and fname.endswith('.gz'):
75 fname = fname[:-3]
79 fname = fname[:-3]
76 flags = 0
80 flags = 0
77 if fname:
81 if fname:
78 flags = gzip.FNAME
82 flags = gzip.FNAME
79 self.fileobj.write(chr(flags))
83 self.fileobj.write(chr(flags))
80 gzip.write32u(self.fileobj, long(self.timestamp))
84 gzip.write32u(self.fileobj, long(self.timestamp))
81 self.fileobj.write('\002')
85 self.fileobj.write('\002')
82 self.fileobj.write('\377')
86 self.fileobj.write('\377')
83 if fname:
87 if fname:
84 self.fileobj.write(fname + '\000')
88 self.fileobj.write(fname + '\000')
85
89
86 def __init__(self, dest, mtime, kind=''):
90 def __init__(self, dest, mtime, kind=''):
87 self.mtime = mtime
91 self.mtime = mtime
88 self.fileobj = None
92 self.fileobj = None
89
93
90 def taropen(name, mode, fileobj=None):
94 def taropen(name, mode, fileobj=None):
91 if kind == 'gz':
95 if kind == 'gz':
92 mode = mode[0]
96 mode = mode[0]
93 if not fileobj:
97 if not fileobj:
94 fileobj = open(name, mode + 'b')
98 fileobj = open(name, mode + 'b')
95 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
99 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
96 zlib.Z_BEST_COMPRESSION,
100 zlib.Z_BEST_COMPRESSION,
97 fileobj, timestamp=mtime)
101 fileobj, timestamp=mtime)
98 self.fileobj = gzfileobj
102 self.fileobj = gzfileobj
99 return tarfile.TarFile.taropen(name, mode, gzfileobj)
103 return tarfile.TarFile.taropen(name, mode, gzfileobj)
100 else:
104 else:
101 self.fileobj = fileobj
105 self.fileobj = fileobj
102 return tarfile.open(name, mode + kind, fileobj)
106 return tarfile.open(name, mode + kind, fileobj)
103
107
104 if isinstance(dest, str):
108 if isinstance(dest, str):
105 self.z = taropen(dest, mode='w:')
109 self.z = taropen(dest, mode='w:')
106 else:
110 else:
107 # Python 2.5-2.5.1 have a regression that requires a name arg
111 # Python 2.5-2.5.1 have a regression that requires a name arg
108 self.z = taropen(name='', mode='w|', fileobj=dest)
112 self.z = taropen(name='', mode='w|', fileobj=dest)
109
113
110 def addfile(self, name, mode, islink, data):
114 def addfile(self, name, mode, islink, data):
111 i = tarfile.TarInfo(name)
115 i = tarfile.TarInfo(name)
112 i.mtime = self.mtime
116 i.mtime = self.mtime
113 i.size = len(data)
117 i.size = len(data)
114 if islink:
118 if islink:
115 i.type = tarfile.SYMTYPE
119 i.type = tarfile.SYMTYPE
116 i.mode = 0777
120 i.mode = 0777
117 i.linkname = data
121 i.linkname = data
118 data = None
122 data = None
119 i.size = 0
123 i.size = 0
120 else:
124 else:
121 i.mode = mode
125 i.mode = mode
122 data = cStringIO.StringIO(data)
126 data = cStringIO.StringIO(data)
123 self.z.addfile(i, data)
127 self.z.addfile(i, data)
124
128
125 def done(self):
129 def done(self):
126 self.z.close()
130 self.z.close()
127 if self.fileobj:
131 if self.fileobj:
128 self.fileobj.close()
132 self.fileobj.close()
129
133
130 class tellable(object):
134 class tellable(object):
131 '''provide tell method for zipfile.ZipFile when writing to http
135 '''provide tell method for zipfile.ZipFile when writing to http
132 response file object.'''
136 response file object.'''
133
137
134 def __init__(self, fp):
138 def __init__(self, fp):
135 self.fp = fp
139 self.fp = fp
136 self.offset = 0
140 self.offset = 0
137
141
138 def __getattr__(self, key):
142 def __getattr__(self, key):
139 return getattr(self.fp, key)
143 return getattr(self.fp, key)
140
144
141 def write(self, s):
145 def write(self, s):
142 self.fp.write(s)
146 self.fp.write(s)
143 self.offset += len(s)
147 self.offset += len(s)
144
148
145 def tell(self):
149 def tell(self):
146 return self.offset
150 return self.offset
147
151
148 class zipit(object):
152 class zipit(object):
149 '''write archive to zip file or stream. can write uncompressed,
153 '''write archive to zip file or stream. can write uncompressed,
150 or compressed with deflate.'''
154 or compressed with deflate.'''
151
155
152 def __init__(self, dest, mtime, compress=True):
156 def __init__(self, dest, mtime, compress=True):
153 if not isinstance(dest, str):
157 if not isinstance(dest, str):
154 try:
158 try:
155 dest.tell()
159 dest.tell()
156 except (AttributeError, IOError):
160 except (AttributeError, IOError):
157 dest = tellable(dest)
161 dest = tellable(dest)
158 self.z = zipfile.ZipFile(dest, 'w',
162 self.z = zipfile.ZipFile(dest, 'w',
159 compress and zipfile.ZIP_DEFLATED or
163 compress and zipfile.ZIP_DEFLATED or
160 zipfile.ZIP_STORED)
164 zipfile.ZIP_STORED)
161
165
162 # Python's zipfile module emits deprecation warnings if we try
166 # Python's zipfile module emits deprecation warnings if we try
163 # to store files with a date before 1980.
167 # to store files with a date before 1980.
164 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
168 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
165 if mtime < epoch:
169 if mtime < epoch:
166 mtime = epoch
170 mtime = epoch
167
171
168 self.date_time = time.gmtime(mtime)[:6]
172 self.date_time = time.gmtime(mtime)[:6]
169
173
170 def addfile(self, name, mode, islink, data):
174 def addfile(self, name, mode, islink, data):
171 i = zipfile.ZipInfo(name, self.date_time)
175 i = zipfile.ZipInfo(name, self.date_time)
172 i.compress_type = self.z.compression
176 i.compress_type = self.z.compression
173 # unzip will not honor unix file modes unless file creator is
177 # unzip will not honor unix file modes unless file creator is
174 # set to unix (id 3).
178 # set to unix (id 3).
175 i.create_system = 3
179 i.create_system = 3
176 ftype = 0x8000 # UNX_IFREG in unzip source code
180 ftype = _UNX_IFREG
177 if islink:
181 if islink:
178 mode = 0777
182 mode = 0777
179 ftype = 0xa000 # UNX_IFLNK in unzip source code
183 ftype = _UNX_IFLNK
180 i.external_attr = (mode | ftype) << 16L
184 i.external_attr = (mode | ftype) << 16L
181 self.z.writestr(i, data)
185 self.z.writestr(i, data)
182
186
183 def done(self):
187 def done(self):
184 self.z.close()
188 self.z.close()
185
189
186 class fileit(object):
190 class fileit(object):
187 '''write archive as files in directory.'''
191 '''write archive as files in directory.'''
188
192
189 def __init__(self, name, mtime):
193 def __init__(self, name, mtime):
190 self.basedir = name
194 self.basedir = name
191 self.opener = scmutil.opener(self.basedir)
195 self.opener = scmutil.opener(self.basedir)
192
196
193 def addfile(self, name, mode, islink, data):
197 def addfile(self, name, mode, islink, data):
194 if islink:
198 if islink:
195 self.opener.symlink(data, name)
199 self.opener.symlink(data, name)
196 return
200 return
197 f = self.opener(name, "w", atomictemp=True)
201 f = self.opener(name, "w", atomictemp=True)
198 f.write(data)
202 f.write(data)
199 f.close()
203 f.close()
200 destfile = os.path.join(self.basedir, name)
204 destfile = os.path.join(self.basedir, name)
201 os.chmod(destfile, mode)
205 os.chmod(destfile, mode)
202
206
203 def done(self):
207 def done(self):
204 pass
208 pass
205
209
206 archivers = {
210 archivers = {
207 'files': fileit,
211 'files': fileit,
208 'tar': tarit,
212 'tar': tarit,
209 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
213 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
210 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
214 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
211 'uzip': lambda name, mtime: zipit(name, mtime, False),
215 'uzip': lambda name, mtime: zipit(name, mtime, False),
212 'zip': zipit,
216 'zip': zipit,
213 }
217 }
214
218
215 def archive(repo, dest, node, kind, decode=True, matchfn=None,
219 def archive(repo, dest, node, kind, decode=True, matchfn=None,
216 prefix=None, mtime=None, subrepos=False):
220 prefix=None, mtime=None, subrepos=False):
217 '''create archive of repo as it was at node.
221 '''create archive of repo as it was at node.
218
222
219 dest can be name of directory, name of archive file, or file
223 dest can be name of directory, name of archive file, or file
220 object to write archive to.
224 object to write archive to.
221
225
222 kind is type of archive to create.
226 kind is type of archive to create.
223
227
224 decode tells whether to put files through decode filters from
228 decode tells whether to put files through decode filters from
225 hgrc.
229 hgrc.
226
230
227 matchfn is function to filter names of files to write to archive.
231 matchfn is function to filter names of files to write to archive.
228
232
229 prefix is name of path to put before every archive member.'''
233 prefix is name of path to put before every archive member.'''
230
234
231 if kind == 'files':
235 if kind == 'files':
232 if prefix:
236 if prefix:
233 raise util.Abort(_('cannot give prefix when archiving to files'))
237 raise util.Abort(_('cannot give prefix when archiving to files'))
234 else:
238 else:
235 prefix = tidyprefix(dest, kind, prefix)
239 prefix = tidyprefix(dest, kind, prefix)
236
240
237 def write(name, mode, islink, getdata):
241 def write(name, mode, islink, getdata):
238 data = getdata()
242 data = getdata()
239 if decode:
243 if decode:
240 data = repo.wwritedata(name, data)
244 data = repo.wwritedata(name, data)
241 archiver.addfile(prefix + name, mode, islink, data)
245 archiver.addfile(prefix + name, mode, islink, data)
242
246
243 if kind not in archivers:
247 if kind not in archivers:
244 raise util.Abort(_("unknown archive type '%s'") % kind)
248 raise util.Abort(_("unknown archive type '%s'") % kind)
245
249
246 ctx = repo[node]
250 ctx = repo[node]
247 archiver = archivers[kind](dest, mtime or ctx.date()[0])
251 archiver = archivers[kind](dest, mtime or ctx.date()[0])
248
252
249 if repo.ui.configbool("ui", "archivemeta", True):
253 if repo.ui.configbool("ui", "archivemeta", True):
250 def metadata():
254 def metadata():
251 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
255 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
252 repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch()))
256 repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch()))
253
257
254 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
258 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
255 if repo.tagtype(t) == 'global')
259 if repo.tagtype(t) == 'global')
256 if not tags:
260 if not tags:
257 repo.ui.pushbuffer()
261 repo.ui.pushbuffer()
258 opts = {'template': '{latesttag}\n{latesttagdistance}',
262 opts = {'template': '{latesttag}\n{latesttagdistance}',
259 'style': '', 'patch': None, 'git': None}
263 'style': '', 'patch': None, 'git': None}
260 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
264 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
261 ltags, dist = repo.ui.popbuffer().split('\n')
265 ltags, dist = repo.ui.popbuffer().split('\n')
262 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
266 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
263 tags += 'latesttagdistance: %s\n' % dist
267 tags += 'latesttagdistance: %s\n' % dist
264
268
265 return base + tags
269 return base + tags
266
270
267 name = '.hg_archival.txt'
271 name = '.hg_archival.txt'
268 if not matchfn or matchfn(name):
272 if not matchfn or matchfn(name):
269 write(name, 0644, False, metadata)
273 write(name, 0644, False, metadata)
270
274
271 if matchfn:
275 if matchfn:
272 files = [f for f in ctx.manifest().keys() if matchfn(f)]
276 files = [f for f in ctx.manifest().keys() if matchfn(f)]
273 else:
277 else:
274 files = ctx.manifest().keys()
278 files = ctx.manifest().keys()
275 files.sort()
279 files.sort()
276 total = len(files)
280 total = len(files)
277 repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
281 repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
278 for i, f in enumerate(files):
282 for i, f in enumerate(files):
279 ff = ctx.flags(f)
283 ff = ctx.flags(f)
280 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
284 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
281 repo.ui.progress(_('archiving'), i + 1, item=f,
285 repo.ui.progress(_('archiving'), i + 1, item=f,
282 unit=_('files'), total=total)
286 unit=_('files'), total=total)
283 repo.ui.progress(_('archiving'), None)
287 repo.ui.progress(_('archiving'), None)
284
288
285 if subrepos:
289 if subrepos:
286 for subpath in ctx.substate:
290 for subpath in ctx.substate:
287 sub = ctx.sub(subpath)
291 sub = ctx.sub(subpath)
288 submatch = matchmod.narrowmatcher(subpath, matchfn)
292 submatch = matchmod.narrowmatcher(subpath, matchfn)
289 sub.archive(repo.ui, archiver, prefix, submatch)
293 sub.archive(repo.ui, archiver, prefix, submatch)
290
294
291 archiver.done()
295 archiver.done()
@@ -1,327 +1,329 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete markers handling
9 """Obsolete markers handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewriting operations, and help
17 transformations performed by history rewriting operations, and help
18 building new tools to reconciliate conflicting rewriting actions. To
18 building new tools to reconciliate conflicting rewriting actions. To
19 facilitate conflicts resolution, markers include various annotations
19 facilitate conflicts resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23
23
24 Format
24 Format
25 ------
25 ------
26
26
27 Markers are stored in an append-only file stored in
27 Markers are stored in an append-only file stored in
28 '.hg/store/obsstore'.
28 '.hg/store/obsstore'.
29
29
30 The file starts with a version header:
30 The file starts with a version header:
31
31
32 - 1 unsigned byte: version number, starting at zero.
32 - 1 unsigned byte: version number, starting at zero.
33
33
34
34
35 The header is followed by the markers. Each marker is made of:
35 The header is followed by the markers. Each marker is made of:
36
36
37 - 1 unsigned byte: number of new changesets "R", could be zero.
37 - 1 unsigned byte: number of new changesets "R", could be zero.
38
38
39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
40
40
41 - 1 byte: a bit field. It is reserved for flags used in obsolete
41 - 1 byte: a bit field. It is reserved for flags used in obsolete
42 markers common operations, to avoid repeated decoding of metadata
42 markers common operations, to avoid repeated decoding of metadata
43 entries.
43 entries.
44
44
45 - 20 bytes: obsoleted changeset identifier.
45 - 20 bytes: obsoleted changeset identifier.
46
46
47 - N*20 bytes: new changesets identifiers.
47 - N*20 bytes: new changesets identifiers.
48
48
49 - M bytes: metadata as a sequence of nul-terminated strings. Each
49 - M bytes: metadata as a sequence of nul-terminated strings. Each
50 string contains a key and a value, separated by a color ':', without
50 string contains a key and a value, separated by a color ':', without
51 additional encoding. Keys cannot contain '\0' or ':' and values
51 additional encoding. Keys cannot contain '\0' or ':' and values
52 cannot contain '\0'.
52 cannot contain '\0'.
53 """
53 """
54 import struct
54 import struct
55 import util, base85
55 import util, base85
56 from i18n import _
56 from i18n import _
57
57
58 _pack = struct.pack
58 _pack = struct.pack
59 _unpack = struct.unpack
59 _unpack = struct.unpack
60
60
61 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
62
61 # the obsolete feature is not mature enough to be enabled by default.
63 # the obsolete feature is not mature enough to be enabled by default.
62 # you have to rely on third party extension extension to enable this.
64 # you have to rely on third party extension extension to enable this.
63 _enabled = False
65 _enabled = False
64
66
65 # data used for parsing and writing
67 # data used for parsing and writing
66 _fmversion = 0
68 _fmversion = 0
67 _fmfixed = '>BIB20s'
69 _fmfixed = '>BIB20s'
68 _fmnode = '20s'
70 _fmnode = '20s'
69 _fmfsize = struct.calcsize(_fmfixed)
71 _fmfsize = struct.calcsize(_fmfixed)
70 _fnodesize = struct.calcsize(_fmnode)
72 _fnodesize = struct.calcsize(_fmnode)
71
73
72 def _readmarkers(data):
74 def _readmarkers(data):
73 """Read and enumerate markers from raw data"""
75 """Read and enumerate markers from raw data"""
74 off = 0
76 off = 0
75 diskversion = _unpack('>B', data[off:off + 1])[0]
77 diskversion = _unpack('>B', data[off:off + 1])[0]
76 off += 1
78 off += 1
77 if diskversion != _fmversion:
79 if diskversion != _fmversion:
78 raise util.Abort(_('parsing obsolete marker: unknown version %r')
80 raise util.Abort(_('parsing obsolete marker: unknown version %r')
79 % diskversion)
81 % diskversion)
80
82
81 # Loop on markers
83 # Loop on markers
82 l = len(data)
84 l = len(data)
83 while off + _fmfsize <= l:
85 while off + _fmfsize <= l:
84 # read fixed part
86 # read fixed part
85 cur = data[off:off + _fmfsize]
87 cur = data[off:off + _fmfsize]
86 off += _fmfsize
88 off += _fmfsize
87 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
89 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
88 # read replacement
90 # read replacement
89 sucs = ()
91 sucs = ()
90 if nbsuc:
92 if nbsuc:
91 s = (_fnodesize * nbsuc)
93 s = (_fnodesize * nbsuc)
92 cur = data[off:off + s]
94 cur = data[off:off + s]
93 sucs = _unpack(_fmnode * nbsuc, cur)
95 sucs = _unpack(_fmnode * nbsuc, cur)
94 off += s
96 off += s
95 # read metadata
97 # read metadata
96 # (metadata will be decoded on demand)
98 # (metadata will be decoded on demand)
97 metadata = data[off:off + mdsize]
99 metadata = data[off:off + mdsize]
98 if len(metadata) != mdsize:
100 if len(metadata) != mdsize:
99 raise util.Abort(_('parsing obsolete marker: metadata is too '
101 raise util.Abort(_('parsing obsolete marker: metadata is too '
100 'short, %d bytes expected, got %d')
102 'short, %d bytes expected, got %d')
101 % (mdsize, len(metadata)))
103 % (mdsize, len(metadata)))
102 off += mdsize
104 off += mdsize
103 yield (pre, sucs, flags, metadata)
105 yield (pre, sucs, flags, metadata)
104
106
105 def encodemeta(meta):
107 def encodemeta(meta):
106 """Return encoded metadata string to string mapping.
108 """Return encoded metadata string to string mapping.
107
109
108 Assume no ':' in key and no '\0' in both key and value."""
110 Assume no ':' in key and no '\0' in both key and value."""
109 for key, value in meta.iteritems():
111 for key, value in meta.iteritems():
110 if ':' in key or '\0' in key:
112 if ':' in key or '\0' in key:
111 raise ValueError("':' and '\0' are forbidden in metadata key'")
113 raise ValueError("':' and '\0' are forbidden in metadata key'")
112 if '\0' in value:
114 if '\0' in value:
113 raise ValueError("':' are forbidden in metadata value'")
115 raise ValueError("':' are forbidden in metadata value'")
114 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
116 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
115
117
116 def decodemeta(data):
118 def decodemeta(data):
117 """Return string to string dictionary from encoded version."""
119 """Return string to string dictionary from encoded version."""
118 d = {}
120 d = {}
119 for l in data.split('\0'):
121 for l in data.split('\0'):
120 if l:
122 if l:
121 key, value = l.split(':')
123 key, value = l.split(':')
122 d[key] = value
124 d[key] = value
123 return d
125 return d
124
126
125 class marker(object):
127 class marker(object):
126 """Wrap obsolete marker raw data"""
128 """Wrap obsolete marker raw data"""
127
129
128 def __init__(self, repo, data):
130 def __init__(self, repo, data):
129 # the repo argument will be used to create changectx in later version
131 # the repo argument will be used to create changectx in later version
130 self._repo = repo
132 self._repo = repo
131 self._data = data
133 self._data = data
132 self._decodedmeta = None
134 self._decodedmeta = None
133
135
134 def precnode(self):
136 def precnode(self):
135 """Precursor changeset node identifier"""
137 """Precursor changeset node identifier"""
136 return self._data[0]
138 return self._data[0]
137
139
138 def succnodes(self):
140 def succnodes(self):
139 """List of successor changesets node identifiers"""
141 """List of successor changesets node identifiers"""
140 return self._data[1]
142 return self._data[1]
141
143
142 def metadata(self):
144 def metadata(self):
143 """Decoded metadata dictionary"""
145 """Decoded metadata dictionary"""
144 if self._decodedmeta is None:
146 if self._decodedmeta is None:
145 self._decodedmeta = decodemeta(self._data[3])
147 self._decodedmeta = decodemeta(self._data[3])
146 return self._decodedmeta
148 return self._decodedmeta
147
149
148 def date(self):
150 def date(self):
149 """Creation date as (unixtime, offset)"""
151 """Creation date as (unixtime, offset)"""
150 parts = self.metadata()['date'].split(' ')
152 parts = self.metadata()['date'].split(' ')
151 return (float(parts[0]), int(parts[1]))
153 return (float(parts[0]), int(parts[1]))
152
154
153 class obsstore(object):
155 class obsstore(object):
154 """Store obsolete markers
156 """Store obsolete markers
155
157
156 Markers can be accessed with two mappings:
158 Markers can be accessed with two mappings:
157 - precursors: old -> set(new)
159 - precursors: old -> set(new)
158 - successors: new -> set(old)
160 - successors: new -> set(old)
159 """
161 """
160
162
161 def __init__(self, sopener):
163 def __init__(self, sopener):
162 self._all = []
164 self._all = []
163 # new markers to serialize
165 # new markers to serialize
164 self.precursors = {}
166 self.precursors = {}
165 self.successors = {}
167 self.successors = {}
166 self.sopener = sopener
168 self.sopener = sopener
167 data = sopener.tryread('obsstore')
169 data = sopener.tryread('obsstore')
168 if data:
170 if data:
169 self._load(_readmarkers(data))
171 self._load(_readmarkers(data))
170
172
171 def __iter__(self):
173 def __iter__(self):
172 return iter(self._all)
174 return iter(self._all)
173
175
174 def __nonzero__(self):
176 def __nonzero__(self):
175 return bool(self._all)
177 return bool(self._all)
176
178
177 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
179 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
178 """obsolete: add a new obsolete marker
180 """obsolete: add a new obsolete marker
179
181
180 * ensuring it is hashable
182 * ensuring it is hashable
181 * check mandatory metadata
183 * check mandatory metadata
182 * encode metadata
184 * encode metadata
183 """
185 """
184 if metadata is None:
186 if metadata is None:
185 metadata = {}
187 metadata = {}
186 if len(prec) != 20:
188 if len(prec) != 20:
187 raise ValueError(prec)
189 raise ValueError(prec)
188 for succ in succs:
190 for succ in succs:
189 if len(succ) != 20:
191 if len(succ) != 20:
190 raise ValueError(succ)
192 raise ValueError(succ)
191 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
193 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
192 self.add(transaction, [marker])
194 self.add(transaction, [marker])
193
195
194 def add(self, transaction, markers):
196 def add(self, transaction, markers):
195 """Add new markers to the store
197 """Add new markers to the store
196
198
197 Take care of filtering duplicate.
199 Take care of filtering duplicate.
198 Return the number of new marker."""
200 Return the number of new marker."""
199 if not _enabled:
201 if not _enabled:
200 raise util.Abort('obsolete feature is not enabled on this repo')
202 raise util.Abort('obsolete feature is not enabled on this repo')
201 new = [m for m in markers if m not in self._all]
203 new = [m for m in markers if m not in self._all]
202 if new:
204 if new:
203 f = self.sopener('obsstore', 'ab')
205 f = self.sopener('obsstore', 'ab')
204 try:
206 try:
205 # Whether the file's current position is at the begin or at
207 # Whether the file's current position is at the begin or at
206 # the end after opening a file for appending is implementation
208 # the end after opening a file for appending is implementation
207 # defined. So we must seek to the end before calling tell(),
209 # defined. So we must seek to the end before calling tell(),
208 # or we may get a zero offset for non-zero sized files on
210 # or we may get a zero offset for non-zero sized files on
209 # some platforms (issue3543).
211 # some platforms (issue3543).
210 f.seek(0, 2) # os.SEEK_END
212 f.seek(0, _SEEK_END)
211 offset = f.tell()
213 offset = f.tell()
212 transaction.add('obsstore', offset)
214 transaction.add('obsstore', offset)
213 # offset == 0: new file - add the version header
215 # offset == 0: new file - add the version header
214 for bytes in _encodemarkers(new, offset == 0):
216 for bytes in _encodemarkers(new, offset == 0):
215 f.write(bytes)
217 f.write(bytes)
216 finally:
218 finally:
217 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
219 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
218 # call 'filecacheentry.refresh()' here
220 # call 'filecacheentry.refresh()' here
219 f.close()
221 f.close()
220 self._load(new)
222 self._load(new)
221 return len(new)
223 return len(new)
222
224
223 def mergemarkers(self, transation, data):
225 def mergemarkers(self, transation, data):
224 markers = _readmarkers(data)
226 markers = _readmarkers(data)
225 self.add(transation, markers)
227 self.add(transation, markers)
226
228
227 def _load(self, markers):
229 def _load(self, markers):
228 for mark in markers:
230 for mark in markers:
229 self._all.append(mark)
231 self._all.append(mark)
230 pre, sucs = mark[:2]
232 pre, sucs = mark[:2]
231 self.precursors.setdefault(pre, set()).add(mark)
233 self.precursors.setdefault(pre, set()).add(mark)
232 for suc in sucs:
234 for suc in sucs:
233 self.successors.setdefault(suc, set()).add(mark)
235 self.successors.setdefault(suc, set()).add(mark)
234
236
235 def _encodemarkers(markers, addheader=False):
237 def _encodemarkers(markers, addheader=False):
236 # Kept separate from flushmarkers(), it will be reused for
238 # Kept separate from flushmarkers(), it will be reused for
237 # markers exchange.
239 # markers exchange.
238 if addheader:
240 if addheader:
239 yield _pack('>B', _fmversion)
241 yield _pack('>B', _fmversion)
240 for marker in markers:
242 for marker in markers:
241 yield _encodeonemarker(marker)
243 yield _encodeonemarker(marker)
242
244
243
245
244 def _encodeonemarker(marker):
246 def _encodeonemarker(marker):
245 pre, sucs, flags, metadata = marker
247 pre, sucs, flags, metadata = marker
246 nbsuc = len(sucs)
248 nbsuc = len(sucs)
247 format = _fmfixed + (_fmnode * nbsuc)
249 format = _fmfixed + (_fmnode * nbsuc)
248 data = [nbsuc, len(metadata), flags, pre]
250 data = [nbsuc, len(metadata), flags, pre]
249 data.extend(sucs)
251 data.extend(sucs)
250 return _pack(format, *data) + metadata
252 return _pack(format, *data) + metadata
251
253
252 # arbitrary picked to fit into 8K limit from HTTP server
254 # arbitrary picked to fit into 8K limit from HTTP server
253 # you have to take in account:
255 # you have to take in account:
254 # - the version header
256 # - the version header
255 # - the base85 encoding
257 # - the base85 encoding
256 _maxpayload = 5300
258 _maxpayload = 5300
257
259
258 def listmarkers(repo):
260 def listmarkers(repo):
259 """List markers over pushkey"""
261 """List markers over pushkey"""
260 if not repo.obsstore:
262 if not repo.obsstore:
261 return {}
263 return {}
262 keys = {}
264 keys = {}
263 parts = []
265 parts = []
264 currentlen = _maxpayload * 2 # ensure we create a new part
266 currentlen = _maxpayload * 2 # ensure we create a new part
265 for marker in repo.obsstore:
267 for marker in repo.obsstore:
266 nextdata = _encodeonemarker(marker)
268 nextdata = _encodeonemarker(marker)
267 if (len(nextdata) + currentlen > _maxpayload):
269 if (len(nextdata) + currentlen > _maxpayload):
268 currentpart = []
270 currentpart = []
269 currentlen = 0
271 currentlen = 0
270 parts.append(currentpart)
272 parts.append(currentpart)
271 currentpart.append(nextdata)
273 currentpart.append(nextdata)
272 currentlen += len(nextdata)
274 currentlen += len(nextdata)
273 for idx, part in enumerate(reversed(parts)):
275 for idx, part in enumerate(reversed(parts)):
274 data = ''.join([_pack('>B', _fmversion)] + part)
276 data = ''.join([_pack('>B', _fmversion)] + part)
275 keys['dump%i' % idx] = base85.b85encode(data)
277 keys['dump%i' % idx] = base85.b85encode(data)
276 return keys
278 return keys
277
279
278 def pushmarker(repo, key, old, new):
280 def pushmarker(repo, key, old, new):
279 """Push markers over pushkey"""
281 """Push markers over pushkey"""
280 if not key.startswith('dump'):
282 if not key.startswith('dump'):
281 repo.ui.warn(_('unknown key: %r') % key)
283 repo.ui.warn(_('unknown key: %r') % key)
282 return 0
284 return 0
283 if old:
285 if old:
284 repo.ui.warn(_('unexpected old value') % key)
286 repo.ui.warn(_('unexpected old value') % key)
285 return 0
287 return 0
286 data = base85.b85decode(new)
288 data = base85.b85decode(new)
287 lock = repo.lock()
289 lock = repo.lock()
288 try:
290 try:
289 tr = repo.transaction('pushkey: obsolete markers')
291 tr = repo.transaction('pushkey: obsolete markers')
290 try:
292 try:
291 repo.obsstore.mergemarkers(tr, data)
293 repo.obsstore.mergemarkers(tr, data)
292 tr.close()
294 tr.close()
293 return 1
295 return 1
294 finally:
296 finally:
295 tr.release()
297 tr.release()
296 finally:
298 finally:
297 lock.release()
299 lock.release()
298
300
299 def allmarkers(repo):
301 def allmarkers(repo):
300 """all obsolete markers known in a repository"""
302 """all obsolete markers known in a repository"""
301 for markerdata in repo.obsstore:
303 for markerdata in repo.obsstore:
302 yield marker(repo, markerdata)
304 yield marker(repo, markerdata)
303
305
304 def precursormarkers(ctx):
306 def precursormarkers(ctx):
305 """obsolete marker making this changeset obsolete"""
307 """obsolete marker making this changeset obsolete"""
306 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
308 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
307 yield marker(ctx._repo, data)
309 yield marker(ctx._repo, data)
308
310
309 def successormarkers(ctx):
311 def successormarkers(ctx):
310 """obsolete marker marking this changeset as a successors"""
312 """obsolete marker marking this changeset as a successors"""
311 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
313 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
312 yield marker(ctx._repo, data)
314 yield marker(ctx._repo, data)
313
315
314 def anysuccessors(obsstore, node):
316 def anysuccessors(obsstore, node):
315 """Yield every successor of <node>
317 """Yield every successor of <node>
316
318
317 This is a linear yield unsuitable to detect split changesets."""
319 This is a linear yield unsuitable to detect split changesets."""
318 remaining = set([node])
320 remaining = set([node])
319 seen = set(remaining)
321 seen = set(remaining)
320 while remaining:
322 while remaining:
321 current = remaining.pop()
323 current = remaining.pop()
322 yield current
324 yield current
323 for mark in obsstore.precursors.get(current, ()):
325 for mark in obsstore.precursors.get(current, ()):
324 for suc in mark[1]:
326 for suc in mark[1]:
325 if suc not in seen:
327 if suc not in seen:
326 seen.add(suc)
328 seen.add(suc)
327 remaining.add(suc)
329 remaining.add(suc)
@@ -1,170 +1,170 b''
1 # osutil.py - pure Python version of osutil.c
1 # osutil.py - pure Python version of osutil.c
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import stat as statmod
9 import stat as statmod
10
10
11 def _mode_to_kind(mode):
11 def _mode_to_kind(mode):
12 if statmod.S_ISREG(mode):
12 if statmod.S_ISREG(mode):
13 return statmod.S_IFREG
13 return statmod.S_IFREG
14 if statmod.S_ISDIR(mode):
14 if statmod.S_ISDIR(mode):
15 return statmod.S_IFDIR
15 return statmod.S_IFDIR
16 if statmod.S_ISLNK(mode):
16 if statmod.S_ISLNK(mode):
17 return statmod.S_IFLNK
17 return statmod.S_IFLNK
18 if statmod.S_ISBLK(mode):
18 if statmod.S_ISBLK(mode):
19 return statmod.S_IFBLK
19 return statmod.S_IFBLK
20 if statmod.S_ISCHR(mode):
20 if statmod.S_ISCHR(mode):
21 return statmod.S_IFCHR
21 return statmod.S_IFCHR
22 if statmod.S_ISFIFO(mode):
22 if statmod.S_ISFIFO(mode):
23 return statmod.S_IFIFO
23 return statmod.S_IFIFO
24 if statmod.S_ISSOCK(mode):
24 if statmod.S_ISSOCK(mode):
25 return statmod.S_IFSOCK
25 return statmod.S_IFSOCK
26 return mode
26 return mode
27
27
28 def listdir(path, stat=False, skip=None):
28 def listdir(path, stat=False, skip=None):
29 '''listdir(path, stat=False) -> list_of_tuples
29 '''listdir(path, stat=False) -> list_of_tuples
30
30
31 Return a sorted list containing information about the entries
31 Return a sorted list containing information about the entries
32 in the directory.
32 in the directory.
33
33
34 If stat is True, each element is a 3-tuple:
34 If stat is True, each element is a 3-tuple:
35
35
36 (name, type, stat object)
36 (name, type, stat object)
37
37
38 Otherwise, each element is a 2-tuple:
38 Otherwise, each element is a 2-tuple:
39
39
40 (name, type)
40 (name, type)
41 '''
41 '''
42 result = []
42 result = []
43 prefix = path
43 prefix = path
44 if not prefix.endswith(os.sep):
44 if not prefix.endswith(os.sep):
45 prefix += os.sep
45 prefix += os.sep
46 names = os.listdir(path)
46 names = os.listdir(path)
47 names.sort()
47 names.sort()
48 for fn in names:
48 for fn in names:
49 st = os.lstat(prefix + fn)
49 st = os.lstat(prefix + fn)
50 if fn == skip and statmod.S_ISDIR(st.st_mode):
50 if fn == skip and statmod.S_ISDIR(st.st_mode):
51 return []
51 return []
52 if stat:
52 if stat:
53 result.append((fn, _mode_to_kind(st.st_mode), st))
53 result.append((fn, _mode_to_kind(st.st_mode), st))
54 else:
54 else:
55 result.append((fn, _mode_to_kind(st.st_mode)))
55 result.append((fn, _mode_to_kind(st.st_mode)))
56 return result
56 return result
57
57
58 if os.name != 'nt':
58 if os.name != 'nt':
59 posixfile = open
59 posixfile = open
60 else:
60 else:
61 import ctypes, msvcrt
61 import ctypes, msvcrt
62
62
63 _kernel32 = ctypes.windll.kernel32
63 _kernel32 = ctypes.windll.kernel32
64
64
65 _DWORD = ctypes.c_ulong
65 _DWORD = ctypes.c_ulong
66 _LPCSTR = _LPSTR = ctypes.c_char_p
66 _LPCSTR = _LPSTR = ctypes.c_char_p
67 _HANDLE = ctypes.c_void_p
67 _HANDLE = ctypes.c_void_p
68
68
69 _INVALID_HANDLE_VALUE = _HANDLE(-1).value
69 _INVALID_HANDLE_VALUE = _HANDLE(-1).value
70
70
71 # CreateFile
71 # CreateFile
72 _FILE_SHARE_READ = 0x00000001
72 _FILE_SHARE_READ = 0x00000001
73 _FILE_SHARE_WRITE = 0x00000002
73 _FILE_SHARE_WRITE = 0x00000002
74 _FILE_SHARE_DELETE = 0x00000004
74 _FILE_SHARE_DELETE = 0x00000004
75
75
76 _CREATE_ALWAYS = 2
76 _CREATE_ALWAYS = 2
77 _OPEN_EXISTING = 3
77 _OPEN_EXISTING = 3
78 _OPEN_ALWAYS = 4
78 _OPEN_ALWAYS = 4
79
79
80 _GENERIC_READ = 0x80000000
80 _GENERIC_READ = 0x80000000
81 _GENERIC_WRITE = 0x40000000
81 _GENERIC_WRITE = 0x40000000
82
82
83 _FILE_ATTRIBUTE_NORMAL = 0x80
83 _FILE_ATTRIBUTE_NORMAL = 0x80
84
84
85 # _open_osfhandle
85 # open_osfhandle flags
86 _O_RDONLY = 0x0000
86 _O_RDONLY = 0x0000
87 _O_RDWR = 0x0002
87 _O_RDWR = 0x0002
88 _O_APPEND = 0x0008
88 _O_APPEND = 0x0008
89
89
90 _O_TEXT = 0x4000
90 _O_TEXT = 0x4000
91 _O_BINARY = 0x8000
91 _O_BINARY = 0x8000
92
92
93 # types of parameters of C functions used (required by pypy)
93 # types of parameters of C functions used (required by pypy)
94
94
95 _kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
95 _kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
96 _DWORD, _DWORD, _HANDLE]
96 _DWORD, _DWORD, _HANDLE]
97 _kernel32.CreateFileA.restype = _HANDLE
97 _kernel32.CreateFileA.restype = _HANDLE
98
98
99 def _raiseioerror(name):
99 def _raiseioerror(name):
100 err = ctypes.WinError()
100 err = ctypes.WinError()
101 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
101 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
102
102
103 class posixfile(object):
103 class posixfile(object):
104 '''a file object aiming for POSIX-like semantics
104 '''a file object aiming for POSIX-like semantics
105
105
106 CPython's open() returns a file that was opened *without* setting the
106 CPython's open() returns a file that was opened *without* setting the
107 _FILE_SHARE_DELETE flag, which causes rename and unlink to abort.
107 _FILE_SHARE_DELETE flag, which causes rename and unlink to abort.
108 This even happens if any hardlinked copy of the file is in open state.
108 This even happens if any hardlinked copy of the file is in open state.
109 We set _FILE_SHARE_DELETE here, so files opened with posixfile can be
109 We set _FILE_SHARE_DELETE here, so files opened with posixfile can be
110 renamed and deleted while they are held open.
110 renamed and deleted while they are held open.
111 Note that if a file opened with posixfile is unlinked, the file
111 Note that if a file opened with posixfile is unlinked, the file
112 remains but cannot be opened again or be recreated under the same name,
112 remains but cannot be opened again or be recreated under the same name,
113 until all reading processes have closed the file.'''
113 until all reading processes have closed the file.'''
114
114
115 def __init__(self, name, mode='r', bufsize=-1):
115 def __init__(self, name, mode='r', bufsize=-1):
116 if 'b' in mode:
116 if 'b' in mode:
117 flags = _O_BINARY
117 flags = _O_BINARY
118 else:
118 else:
119 flags = _O_TEXT
119 flags = _O_TEXT
120
120
121 m0 = mode[0]
121 m0 = mode[0]
122 if m0 == 'r' and '+' not in mode:
122 if m0 == 'r' and '+' not in mode:
123 flags |= _O_RDONLY
123 flags |= _O_RDONLY
124 access = _GENERIC_READ
124 access = _GENERIC_READ
125 else:
125 else:
126 # work around http://support.microsoft.com/kb/899149 and
126 # work around http://support.microsoft.com/kb/899149 and
127 # set _O_RDWR for 'w' and 'a', even if mode has no '+'
127 # set _O_RDWR for 'w' and 'a', even if mode has no '+'
128 flags |= _O_RDWR
128 flags |= _O_RDWR
129 access = _GENERIC_READ | _GENERIC_WRITE
129 access = _GENERIC_READ | _GENERIC_WRITE
130
130
131 if m0 == 'r':
131 if m0 == 'r':
132 creation = _OPEN_EXISTING
132 creation = _OPEN_EXISTING
133 elif m0 == 'w':
133 elif m0 == 'w':
134 creation = _CREATE_ALWAYS
134 creation = _CREATE_ALWAYS
135 elif m0 == 'a':
135 elif m0 == 'a':
136 creation = _OPEN_ALWAYS
136 creation = _OPEN_ALWAYS
137 flags |= _O_APPEND
137 flags |= _O_APPEND
138 else:
138 else:
139 raise ValueError("invalid mode: %s" % mode)
139 raise ValueError("invalid mode: %s" % mode)
140
140
141 fh = _kernel32.CreateFileA(name, access,
141 fh = _kernel32.CreateFileA(name, access,
142 _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
142 _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
143 None, creation, _FILE_ATTRIBUTE_NORMAL, None)
143 None, creation, _FILE_ATTRIBUTE_NORMAL, None)
144 if fh == _INVALID_HANDLE_VALUE:
144 if fh == _INVALID_HANDLE_VALUE:
145 _raiseioerror(name)
145 _raiseioerror(name)
146
146
147 fd = msvcrt.open_osfhandle(fh, flags)
147 fd = msvcrt.open_osfhandle(fh, flags)
148 if fd == -1:
148 if fd == -1:
149 _kernel32.CloseHandle(fh)
149 _kernel32.CloseHandle(fh)
150 _raiseioerror(name)
150 _raiseioerror(name)
151
151
152 f = os.fdopen(fd, mode, bufsize)
152 f = os.fdopen(fd, mode, bufsize)
153 # unfortunately, f.name is '<fdopen>' at this point -- so we store
153 # unfortunately, f.name is '<fdopen>' at this point -- so we store
154 # the name on this wrapper. We cannot just assign to f.name,
154 # the name on this wrapper. We cannot just assign to f.name,
155 # because that attribute is read-only.
155 # because that attribute is read-only.
156 object.__setattr__(self, 'name', name)
156 object.__setattr__(self, 'name', name)
157 object.__setattr__(self, '_file', f)
157 object.__setattr__(self, '_file', f)
158
158
159 def __iter__(self):
159 def __iter__(self):
160 return self._file
160 return self._file
161
161
162 def __getattr__(self, name):
162 def __getattr__(self, name):
163 return getattr(self._file, name)
163 return getattr(self._file, name)
164
164
165 def __setattr__(self, name, value):
165 def __setattr__(self, name, value):
166 '''mimics the read-only attributes of Python file objects
166 '''mimics the read-only attributes of Python file objects
167 by raising 'TypeError: readonly attribute' if someone tries:
167 by raising 'TypeError: readonly attribute' if someone tries:
168 f = posixfile('foo.txt')
168 f = posixfile('foo.txt')
169 f.name = 'bla' '''
169 f.name = 'bla' '''
170 return self._file.__setattr__(name, value)
170 return self._file.__setattr__(name, value)
General Comments 0
You need to be logged in to leave comments. Login now