Show More
@@ -1,291 +1,295 b'' | |||
|
1 | 1 | # archival.py - revision archival for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from i18n import _ |
|
9 | 9 | from node import hex |
|
10 | 10 | import match as matchmod |
|
11 | 11 | import cmdutil |
|
12 | 12 | import scmutil, util, encoding |
|
13 | 13 | import cStringIO, os, tarfile, time, zipfile |
|
14 | 14 | import zlib, gzip |
|
15 | 15 | |
|
16 | # from unzip source code: | |
|
17 | _UNX_IFREG = 0x8000 | |
|
18 | _UNX_IFLNK = 0xa000 | |
|
19 | ||
|
16 | 20 | def tidyprefix(dest, kind, prefix): |
|
17 | 21 | '''choose prefix to use for names in archive. make sure prefix is |
|
18 | 22 | safe for consumers.''' |
|
19 | 23 | |
|
20 | 24 | if prefix: |
|
21 | 25 | prefix = util.normpath(prefix) |
|
22 | 26 | else: |
|
23 | 27 | if not isinstance(dest, str): |
|
24 | 28 | raise ValueError('dest must be string if no prefix') |
|
25 | 29 | prefix = os.path.basename(dest) |
|
26 | 30 | lower = prefix.lower() |
|
27 | 31 | for sfx in exts.get(kind, []): |
|
28 | 32 | if lower.endswith(sfx): |
|
29 | 33 | prefix = prefix[:-len(sfx)] |
|
30 | 34 | break |
|
31 | 35 | lpfx = os.path.normpath(util.localpath(prefix)) |
|
32 | 36 | prefix = util.pconvert(lpfx) |
|
33 | 37 | if not prefix.endswith('/'): |
|
34 | 38 | prefix += '/' |
|
35 | 39 | if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix: |
|
36 | 40 | raise util.Abort(_('archive prefix contains illegal components')) |
|
37 | 41 | return prefix |
|
38 | 42 | |
|
39 | 43 | exts = { |
|
40 | 44 | 'tar': ['.tar'], |
|
41 | 45 | 'tbz2': ['.tbz2', '.tar.bz2'], |
|
42 | 46 | 'tgz': ['.tgz', '.tar.gz'], |
|
43 | 47 | 'zip': ['.zip'], |
|
44 | 48 | } |
|
45 | 49 | |
|
46 | 50 | def guesskind(dest): |
|
47 | 51 | for kind, extensions in exts.iteritems(): |
|
48 | 52 | if util.any(dest.endswith(ext) for ext in extensions): |
|
49 | 53 | return kind |
|
50 | 54 | return None |
|
51 | 55 | |
|
52 | 56 | |
|
53 | 57 | class tarit(object): |
|
54 | 58 | '''write archive to tar file or stream. can write uncompressed, |
|
55 | 59 | or compress with gzip or bzip2.''' |
|
56 | 60 | |
|
57 | 61 | class GzipFileWithTime(gzip.GzipFile): |
|
58 | 62 | |
|
59 | 63 | def __init__(self, *args, **kw): |
|
60 | 64 | timestamp = None |
|
61 | 65 | if 'timestamp' in kw: |
|
62 | 66 | timestamp = kw.pop('timestamp') |
|
63 | 67 | if timestamp is None: |
|
64 | 68 | self.timestamp = time.time() |
|
65 | 69 | else: |
|
66 | 70 | self.timestamp = timestamp |
|
67 | 71 | gzip.GzipFile.__init__(self, *args, **kw) |
|
68 | 72 | |
|
69 | 73 | def _write_gzip_header(self): |
|
70 | 74 | self.fileobj.write('\037\213') # magic header |
|
71 | 75 | self.fileobj.write('\010') # compression method |
|
72 | 76 | # Python 2.6 deprecates self.filename |
|
73 | 77 | fname = getattr(self, 'name', None) or self.filename |
|
74 | 78 | if fname and fname.endswith('.gz'): |
|
75 | 79 | fname = fname[:-3] |
|
76 | 80 | flags = 0 |
|
77 | 81 | if fname: |
|
78 | 82 | flags = gzip.FNAME |
|
79 | 83 | self.fileobj.write(chr(flags)) |
|
80 | 84 | gzip.write32u(self.fileobj, long(self.timestamp)) |
|
81 | 85 | self.fileobj.write('\002') |
|
82 | 86 | self.fileobj.write('\377') |
|
83 | 87 | if fname: |
|
84 | 88 | self.fileobj.write(fname + '\000') |
|
85 | 89 | |
|
86 | 90 | def __init__(self, dest, mtime, kind=''): |
|
87 | 91 | self.mtime = mtime |
|
88 | 92 | self.fileobj = None |
|
89 | 93 | |
|
90 | 94 | def taropen(name, mode, fileobj=None): |
|
91 | 95 | if kind == 'gz': |
|
92 | 96 | mode = mode[0] |
|
93 | 97 | if not fileobj: |
|
94 | 98 | fileobj = open(name, mode + 'b') |
|
95 | 99 | gzfileobj = self.GzipFileWithTime(name, mode + 'b', |
|
96 | 100 | zlib.Z_BEST_COMPRESSION, |
|
97 | 101 | fileobj, timestamp=mtime) |
|
98 | 102 | self.fileobj = gzfileobj |
|
99 | 103 | return tarfile.TarFile.taropen(name, mode, gzfileobj) |
|
100 | 104 | else: |
|
101 | 105 | self.fileobj = fileobj |
|
102 | 106 | return tarfile.open(name, mode + kind, fileobj) |
|
103 | 107 | |
|
104 | 108 | if isinstance(dest, str): |
|
105 | 109 | self.z = taropen(dest, mode='w:') |
|
106 | 110 | else: |
|
107 | 111 | # Python 2.5-2.5.1 have a regression that requires a name arg |
|
108 | 112 | self.z = taropen(name='', mode='w|', fileobj=dest) |
|
109 | 113 | |
|
110 | 114 | def addfile(self, name, mode, islink, data): |
|
111 | 115 | i = tarfile.TarInfo(name) |
|
112 | 116 | i.mtime = self.mtime |
|
113 | 117 | i.size = len(data) |
|
114 | 118 | if islink: |
|
115 | 119 | i.type = tarfile.SYMTYPE |
|
116 | 120 | i.mode = 0777 |
|
117 | 121 | i.linkname = data |
|
118 | 122 | data = None |
|
119 | 123 | i.size = 0 |
|
120 | 124 | else: |
|
121 | 125 | i.mode = mode |
|
122 | 126 | data = cStringIO.StringIO(data) |
|
123 | 127 | self.z.addfile(i, data) |
|
124 | 128 | |
|
125 | 129 | def done(self): |
|
126 | 130 | self.z.close() |
|
127 | 131 | if self.fileobj: |
|
128 | 132 | self.fileobj.close() |
|
129 | 133 | |
|
130 | 134 | class tellable(object): |
|
131 | 135 | '''provide tell method for zipfile.ZipFile when writing to http |
|
132 | 136 | response file object.''' |
|
133 | 137 | |
|
134 | 138 | def __init__(self, fp): |
|
135 | 139 | self.fp = fp |
|
136 | 140 | self.offset = 0 |
|
137 | 141 | |
|
138 | 142 | def __getattr__(self, key): |
|
139 | 143 | return getattr(self.fp, key) |
|
140 | 144 | |
|
141 | 145 | def write(self, s): |
|
142 | 146 | self.fp.write(s) |
|
143 | 147 | self.offset += len(s) |
|
144 | 148 | |
|
145 | 149 | def tell(self): |
|
146 | 150 | return self.offset |
|
147 | 151 | |
|
148 | 152 | class zipit(object): |
|
149 | 153 | '''write archive to zip file or stream. can write uncompressed, |
|
150 | 154 | or compressed with deflate.''' |
|
151 | 155 | |
|
152 | 156 | def __init__(self, dest, mtime, compress=True): |
|
153 | 157 | if not isinstance(dest, str): |
|
154 | 158 | try: |
|
155 | 159 | dest.tell() |
|
156 | 160 | except (AttributeError, IOError): |
|
157 | 161 | dest = tellable(dest) |
|
158 | 162 | self.z = zipfile.ZipFile(dest, 'w', |
|
159 | 163 | compress and zipfile.ZIP_DEFLATED or |
|
160 | 164 | zipfile.ZIP_STORED) |
|
161 | 165 | |
|
162 | 166 | # Python's zipfile module emits deprecation warnings if we try |
|
163 | 167 | # to store files with a date before 1980. |
|
164 | 168 | epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0)) |
|
165 | 169 | if mtime < epoch: |
|
166 | 170 | mtime = epoch |
|
167 | 171 | |
|
168 | 172 | self.date_time = time.gmtime(mtime)[:6] |
|
169 | 173 | |
|
170 | 174 | def addfile(self, name, mode, islink, data): |
|
171 | 175 | i = zipfile.ZipInfo(name, self.date_time) |
|
172 | 176 | i.compress_type = self.z.compression |
|
173 | 177 | # unzip will not honor unix file modes unless file creator is |
|
174 | 178 | # set to unix (id 3). |
|
175 | 179 | i.create_system = 3 |
|
176 |
ftype = |
|
|
180 | ftype = _UNX_IFREG | |
|
177 | 181 | if islink: |
|
178 | 182 | mode = 0777 |
|
179 |
ftype = |
|
|
183 | ftype = _UNX_IFLNK | |
|
180 | 184 | i.external_attr = (mode | ftype) << 16L |
|
181 | 185 | self.z.writestr(i, data) |
|
182 | 186 | |
|
183 | 187 | def done(self): |
|
184 | 188 | self.z.close() |
|
185 | 189 | |
|
186 | 190 | class fileit(object): |
|
187 | 191 | '''write archive as files in directory.''' |
|
188 | 192 | |
|
189 | 193 | def __init__(self, name, mtime): |
|
190 | 194 | self.basedir = name |
|
191 | 195 | self.opener = scmutil.opener(self.basedir) |
|
192 | 196 | |
|
193 | 197 | def addfile(self, name, mode, islink, data): |
|
194 | 198 | if islink: |
|
195 | 199 | self.opener.symlink(data, name) |
|
196 | 200 | return |
|
197 | 201 | f = self.opener(name, "w", atomictemp=True) |
|
198 | 202 | f.write(data) |
|
199 | 203 | f.close() |
|
200 | 204 | destfile = os.path.join(self.basedir, name) |
|
201 | 205 | os.chmod(destfile, mode) |
|
202 | 206 | |
|
203 | 207 | def done(self): |
|
204 | 208 | pass |
|
205 | 209 | |
|
206 | 210 | archivers = { |
|
207 | 211 | 'files': fileit, |
|
208 | 212 | 'tar': tarit, |
|
209 | 213 | 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'), |
|
210 | 214 | 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'), |
|
211 | 215 | 'uzip': lambda name, mtime: zipit(name, mtime, False), |
|
212 | 216 | 'zip': zipit, |
|
213 | 217 | } |
|
214 | 218 | |
|
215 | 219 | def archive(repo, dest, node, kind, decode=True, matchfn=None, |
|
216 | 220 | prefix=None, mtime=None, subrepos=False): |
|
217 | 221 | '''create archive of repo as it was at node. |
|
218 | 222 | |
|
219 | 223 | dest can be name of directory, name of archive file, or file |
|
220 | 224 | object to write archive to. |
|
221 | 225 | |
|
222 | 226 | kind is type of archive to create. |
|
223 | 227 | |
|
224 | 228 | decode tells whether to put files through decode filters from |
|
225 | 229 | hgrc. |
|
226 | 230 | |
|
227 | 231 | matchfn is function to filter names of files to write to archive. |
|
228 | 232 | |
|
229 | 233 | prefix is name of path to put before every archive member.''' |
|
230 | 234 | |
|
231 | 235 | if kind == 'files': |
|
232 | 236 | if prefix: |
|
233 | 237 | raise util.Abort(_('cannot give prefix when archiving to files')) |
|
234 | 238 | else: |
|
235 | 239 | prefix = tidyprefix(dest, kind, prefix) |
|
236 | 240 | |
|
237 | 241 | def write(name, mode, islink, getdata): |
|
238 | 242 | data = getdata() |
|
239 | 243 | if decode: |
|
240 | 244 | data = repo.wwritedata(name, data) |
|
241 | 245 | archiver.addfile(prefix + name, mode, islink, data) |
|
242 | 246 | |
|
243 | 247 | if kind not in archivers: |
|
244 | 248 | raise util.Abort(_("unknown archive type '%s'") % kind) |
|
245 | 249 | |
|
246 | 250 | ctx = repo[node] |
|
247 | 251 | archiver = archivers[kind](dest, mtime or ctx.date()[0]) |
|
248 | 252 | |
|
249 | 253 | if repo.ui.configbool("ui", "archivemeta", True): |
|
250 | 254 | def metadata(): |
|
251 | 255 | base = 'repo: %s\nnode: %s\nbranch: %s\n' % ( |
|
252 | 256 | repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch())) |
|
253 | 257 | |
|
254 | 258 | tags = ''.join('tag: %s\n' % t for t in ctx.tags() |
|
255 | 259 | if repo.tagtype(t) == 'global') |
|
256 | 260 | if not tags: |
|
257 | 261 | repo.ui.pushbuffer() |
|
258 | 262 | opts = {'template': '{latesttag}\n{latesttagdistance}', |
|
259 | 263 | 'style': '', 'patch': None, 'git': None} |
|
260 | 264 | cmdutil.show_changeset(repo.ui, repo, opts).show(ctx) |
|
261 | 265 | ltags, dist = repo.ui.popbuffer().split('\n') |
|
262 | 266 | tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':')) |
|
263 | 267 | tags += 'latesttagdistance: %s\n' % dist |
|
264 | 268 | |
|
265 | 269 | return base + tags |
|
266 | 270 | |
|
267 | 271 | name = '.hg_archival.txt' |
|
268 | 272 | if not matchfn or matchfn(name): |
|
269 | 273 | write(name, 0644, False, metadata) |
|
270 | 274 | |
|
271 | 275 | if matchfn: |
|
272 | 276 | files = [f for f in ctx.manifest().keys() if matchfn(f)] |
|
273 | 277 | else: |
|
274 | 278 | files = ctx.manifest().keys() |
|
275 | 279 | files.sort() |
|
276 | 280 | total = len(files) |
|
277 | 281 | repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total) |
|
278 | 282 | for i, f in enumerate(files): |
|
279 | 283 | ff = ctx.flags(f) |
|
280 | 284 | write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data) |
|
281 | 285 | repo.ui.progress(_('archiving'), i + 1, item=f, |
|
282 | 286 | unit=_('files'), total=total) |
|
283 | 287 | repo.ui.progress(_('archiving'), None) |
|
284 | 288 | |
|
285 | 289 | if subrepos: |
|
286 | 290 | for subpath in ctx.substate: |
|
287 | 291 | sub = ctx.sub(subpath) |
|
288 | 292 | submatch = matchmod.narrowmatcher(subpath, matchfn) |
|
289 | 293 | sub.archive(repo.ui, archiver, prefix, submatch) |
|
290 | 294 | |
|
291 | 295 | archiver.done() |
@@ -1,327 +1,329 b'' | |||
|
1 | 1 | # obsolete.py - obsolete markers handling |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
|
4 | 4 | # Logilab SA <contact@logilab.fr> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | """Obsolete markers handling |
|
10 | 10 | |
|
11 | 11 | An obsolete marker maps an old changeset to a list of new |
|
12 | 12 | changesets. If the list of new changesets is empty, the old changeset |
|
13 | 13 | is said to be "killed". Otherwise, the old changeset is being |
|
14 | 14 | "replaced" by the new changesets. |
|
15 | 15 | |
|
16 | 16 | Obsolete markers can be used to record and distribute changeset graph |
|
17 | 17 | transformations performed by history rewriting operations, and help |
|
18 | 18 | building new tools to reconciliate conflicting rewriting actions. To |
|
19 | 19 | facilitate conflicts resolution, markers include various annotations |
|
20 | 20 | besides old and news changeset identifiers, such as creation date or |
|
21 | 21 | author name. |
|
22 | 22 | |
|
23 | 23 | |
|
24 | 24 | Format |
|
25 | 25 | ------ |
|
26 | 26 | |
|
27 | 27 | Markers are stored in an append-only file stored in |
|
28 | 28 | '.hg/store/obsstore'. |
|
29 | 29 | |
|
30 | 30 | The file starts with a version header: |
|
31 | 31 | |
|
32 | 32 | - 1 unsigned byte: version number, starting at zero. |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | The header is followed by the markers. Each marker is made of: |
|
36 | 36 | |
|
37 | 37 | - 1 unsigned byte: number of new changesets "R", could be zero. |
|
38 | 38 | |
|
39 | 39 | - 1 unsigned 32-bits integer: metadata size "M" in bytes. |
|
40 | 40 | |
|
41 | 41 | - 1 byte: a bit field. It is reserved for flags used in obsolete |
|
42 | 42 | markers common operations, to avoid repeated decoding of metadata |
|
43 | 43 | entries. |
|
44 | 44 | |
|
45 | 45 | - 20 bytes: obsoleted changeset identifier. |
|
46 | 46 | |
|
47 | 47 | - N*20 bytes: new changesets identifiers. |
|
48 | 48 | |
|
49 | 49 | - M bytes: metadata as a sequence of nul-terminated strings. Each |
|
50 | 50 | string contains a key and a value, separated by a color ':', without |
|
51 | 51 | additional encoding. Keys cannot contain '\0' or ':' and values |
|
52 | 52 | cannot contain '\0'. |
|
53 | 53 | """ |
|
54 | 54 | import struct |
|
55 | 55 | import util, base85 |
|
56 | 56 | from i18n import _ |
|
57 | 57 | |
|
58 | 58 | _pack = struct.pack |
|
59 | 59 | _unpack = struct.unpack |
|
60 | 60 | |
|
61 | _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5 | |
|
62 | ||
|
61 | 63 | # the obsolete feature is not mature enough to be enabled by default. |
|
62 | 64 | # you have to rely on third party extension extension to enable this. |
|
63 | 65 | _enabled = False |
|
64 | 66 | |
|
65 | 67 | # data used for parsing and writing |
|
66 | 68 | _fmversion = 0 |
|
67 | 69 | _fmfixed = '>BIB20s' |
|
68 | 70 | _fmnode = '20s' |
|
69 | 71 | _fmfsize = struct.calcsize(_fmfixed) |
|
70 | 72 | _fnodesize = struct.calcsize(_fmnode) |
|
71 | 73 | |
|
72 | 74 | def _readmarkers(data): |
|
73 | 75 | """Read and enumerate markers from raw data""" |
|
74 | 76 | off = 0 |
|
75 | 77 | diskversion = _unpack('>B', data[off:off + 1])[0] |
|
76 | 78 | off += 1 |
|
77 | 79 | if diskversion != _fmversion: |
|
78 | 80 | raise util.Abort(_('parsing obsolete marker: unknown version %r') |
|
79 | 81 | % diskversion) |
|
80 | 82 | |
|
81 | 83 | # Loop on markers |
|
82 | 84 | l = len(data) |
|
83 | 85 | while off + _fmfsize <= l: |
|
84 | 86 | # read fixed part |
|
85 | 87 | cur = data[off:off + _fmfsize] |
|
86 | 88 | off += _fmfsize |
|
87 | 89 | nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur) |
|
88 | 90 | # read replacement |
|
89 | 91 | sucs = () |
|
90 | 92 | if nbsuc: |
|
91 | 93 | s = (_fnodesize * nbsuc) |
|
92 | 94 | cur = data[off:off + s] |
|
93 | 95 | sucs = _unpack(_fmnode * nbsuc, cur) |
|
94 | 96 | off += s |
|
95 | 97 | # read metadata |
|
96 | 98 | # (metadata will be decoded on demand) |
|
97 | 99 | metadata = data[off:off + mdsize] |
|
98 | 100 | if len(metadata) != mdsize: |
|
99 | 101 | raise util.Abort(_('parsing obsolete marker: metadata is too ' |
|
100 | 102 | 'short, %d bytes expected, got %d') |
|
101 | 103 | % (mdsize, len(metadata))) |
|
102 | 104 | off += mdsize |
|
103 | 105 | yield (pre, sucs, flags, metadata) |
|
104 | 106 | |
|
105 | 107 | def encodemeta(meta): |
|
106 | 108 | """Return encoded metadata string to string mapping. |
|
107 | 109 | |
|
108 | 110 | Assume no ':' in key and no '\0' in both key and value.""" |
|
109 | 111 | for key, value in meta.iteritems(): |
|
110 | 112 | if ':' in key or '\0' in key: |
|
111 | 113 | raise ValueError("':' and '\0' are forbidden in metadata key'") |
|
112 | 114 | if '\0' in value: |
|
113 | 115 | raise ValueError("':' are forbidden in metadata value'") |
|
114 | 116 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) |
|
115 | 117 | |
|
116 | 118 | def decodemeta(data): |
|
117 | 119 | """Return string to string dictionary from encoded version.""" |
|
118 | 120 | d = {} |
|
119 | 121 | for l in data.split('\0'): |
|
120 | 122 | if l: |
|
121 | 123 | key, value = l.split(':') |
|
122 | 124 | d[key] = value |
|
123 | 125 | return d |
|
124 | 126 | |
|
125 | 127 | class marker(object): |
|
126 | 128 | """Wrap obsolete marker raw data""" |
|
127 | 129 | |
|
128 | 130 | def __init__(self, repo, data): |
|
129 | 131 | # the repo argument will be used to create changectx in later version |
|
130 | 132 | self._repo = repo |
|
131 | 133 | self._data = data |
|
132 | 134 | self._decodedmeta = None |
|
133 | 135 | |
|
134 | 136 | def precnode(self): |
|
135 | 137 | """Precursor changeset node identifier""" |
|
136 | 138 | return self._data[0] |
|
137 | 139 | |
|
138 | 140 | def succnodes(self): |
|
139 | 141 | """List of successor changesets node identifiers""" |
|
140 | 142 | return self._data[1] |
|
141 | 143 | |
|
142 | 144 | def metadata(self): |
|
143 | 145 | """Decoded metadata dictionary""" |
|
144 | 146 | if self._decodedmeta is None: |
|
145 | 147 | self._decodedmeta = decodemeta(self._data[3]) |
|
146 | 148 | return self._decodedmeta |
|
147 | 149 | |
|
148 | 150 | def date(self): |
|
149 | 151 | """Creation date as (unixtime, offset)""" |
|
150 | 152 | parts = self.metadata()['date'].split(' ') |
|
151 | 153 | return (float(parts[0]), int(parts[1])) |
|
152 | 154 | |
|
153 | 155 | class obsstore(object): |
|
154 | 156 | """Store obsolete markers |
|
155 | 157 | |
|
156 | 158 | Markers can be accessed with two mappings: |
|
157 | 159 | - precursors: old -> set(new) |
|
158 | 160 | - successors: new -> set(old) |
|
159 | 161 | """ |
|
160 | 162 | |
|
161 | 163 | def __init__(self, sopener): |
|
162 | 164 | self._all = [] |
|
163 | 165 | # new markers to serialize |
|
164 | 166 | self.precursors = {} |
|
165 | 167 | self.successors = {} |
|
166 | 168 | self.sopener = sopener |
|
167 | 169 | data = sopener.tryread('obsstore') |
|
168 | 170 | if data: |
|
169 | 171 | self._load(_readmarkers(data)) |
|
170 | 172 | |
|
171 | 173 | def __iter__(self): |
|
172 | 174 | return iter(self._all) |
|
173 | 175 | |
|
174 | 176 | def __nonzero__(self): |
|
175 | 177 | return bool(self._all) |
|
176 | 178 | |
|
177 | 179 | def create(self, transaction, prec, succs=(), flag=0, metadata=None): |
|
178 | 180 | """obsolete: add a new obsolete marker |
|
179 | 181 | |
|
180 | 182 | * ensuring it is hashable |
|
181 | 183 | * check mandatory metadata |
|
182 | 184 | * encode metadata |
|
183 | 185 | """ |
|
184 | 186 | if metadata is None: |
|
185 | 187 | metadata = {} |
|
186 | 188 | if len(prec) != 20: |
|
187 | 189 | raise ValueError(prec) |
|
188 | 190 | for succ in succs: |
|
189 | 191 | if len(succ) != 20: |
|
190 | 192 | raise ValueError(succ) |
|
191 | 193 | marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) |
|
192 | 194 | self.add(transaction, [marker]) |
|
193 | 195 | |
|
194 | 196 | def add(self, transaction, markers): |
|
195 | 197 | """Add new markers to the store |
|
196 | 198 | |
|
197 | 199 | Take care of filtering duplicate. |
|
198 | 200 | Return the number of new marker.""" |
|
199 | 201 | if not _enabled: |
|
200 | 202 | raise util.Abort('obsolete feature is not enabled on this repo') |
|
201 | 203 | new = [m for m in markers if m not in self._all] |
|
202 | 204 | if new: |
|
203 | 205 | f = self.sopener('obsstore', 'ab') |
|
204 | 206 | try: |
|
205 | 207 | # Whether the file's current position is at the begin or at |
|
206 | 208 | # the end after opening a file for appending is implementation |
|
207 | 209 | # defined. So we must seek to the end before calling tell(), |
|
208 | 210 | # or we may get a zero offset for non-zero sized files on |
|
209 | 211 | # some platforms (issue3543). |
|
210 |
f.seek(0, |
|
|
212 | f.seek(0, _SEEK_END) | |
|
211 | 213 | offset = f.tell() |
|
212 | 214 | transaction.add('obsstore', offset) |
|
213 | 215 | # offset == 0: new file - add the version header |
|
214 | 216 | for bytes in _encodemarkers(new, offset == 0): |
|
215 | 217 | f.write(bytes) |
|
216 | 218 | finally: |
|
217 | 219 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. |
|
218 | 220 | # call 'filecacheentry.refresh()' here |
|
219 | 221 | f.close() |
|
220 | 222 | self._load(new) |
|
221 | 223 | return len(new) |
|
222 | 224 | |
|
223 | 225 | def mergemarkers(self, transation, data): |
|
224 | 226 | markers = _readmarkers(data) |
|
225 | 227 | self.add(transation, markers) |
|
226 | 228 | |
|
227 | 229 | def _load(self, markers): |
|
228 | 230 | for mark in markers: |
|
229 | 231 | self._all.append(mark) |
|
230 | 232 | pre, sucs = mark[:2] |
|
231 | 233 | self.precursors.setdefault(pre, set()).add(mark) |
|
232 | 234 | for suc in sucs: |
|
233 | 235 | self.successors.setdefault(suc, set()).add(mark) |
|
234 | 236 | |
|
235 | 237 | def _encodemarkers(markers, addheader=False): |
|
236 | 238 | # Kept separate from flushmarkers(), it will be reused for |
|
237 | 239 | # markers exchange. |
|
238 | 240 | if addheader: |
|
239 | 241 | yield _pack('>B', _fmversion) |
|
240 | 242 | for marker in markers: |
|
241 | 243 | yield _encodeonemarker(marker) |
|
242 | 244 | |
|
243 | 245 | |
|
244 | 246 | def _encodeonemarker(marker): |
|
245 | 247 | pre, sucs, flags, metadata = marker |
|
246 | 248 | nbsuc = len(sucs) |
|
247 | 249 | format = _fmfixed + (_fmnode * nbsuc) |
|
248 | 250 | data = [nbsuc, len(metadata), flags, pre] |
|
249 | 251 | data.extend(sucs) |
|
250 | 252 | return _pack(format, *data) + metadata |
|
251 | 253 | |
|
252 | 254 | # arbitrary picked to fit into 8K limit from HTTP server |
|
253 | 255 | # you have to take in account: |
|
254 | 256 | # - the version header |
|
255 | 257 | # - the base85 encoding |
|
256 | 258 | _maxpayload = 5300 |
|
257 | 259 | |
|
258 | 260 | def listmarkers(repo): |
|
259 | 261 | """List markers over pushkey""" |
|
260 | 262 | if not repo.obsstore: |
|
261 | 263 | return {} |
|
262 | 264 | keys = {} |
|
263 | 265 | parts = [] |
|
264 | 266 | currentlen = _maxpayload * 2 # ensure we create a new part |
|
265 | 267 | for marker in repo.obsstore: |
|
266 | 268 | nextdata = _encodeonemarker(marker) |
|
267 | 269 | if (len(nextdata) + currentlen > _maxpayload): |
|
268 | 270 | currentpart = [] |
|
269 | 271 | currentlen = 0 |
|
270 | 272 | parts.append(currentpart) |
|
271 | 273 | currentpart.append(nextdata) |
|
272 | 274 | currentlen += len(nextdata) |
|
273 | 275 | for idx, part in enumerate(reversed(parts)): |
|
274 | 276 | data = ''.join([_pack('>B', _fmversion)] + part) |
|
275 | 277 | keys['dump%i' % idx] = base85.b85encode(data) |
|
276 | 278 | return keys |
|
277 | 279 | |
|
278 | 280 | def pushmarker(repo, key, old, new): |
|
279 | 281 | """Push markers over pushkey""" |
|
280 | 282 | if not key.startswith('dump'): |
|
281 | 283 | repo.ui.warn(_('unknown key: %r') % key) |
|
282 | 284 | return 0 |
|
283 | 285 | if old: |
|
284 | 286 | repo.ui.warn(_('unexpected old value') % key) |
|
285 | 287 | return 0 |
|
286 | 288 | data = base85.b85decode(new) |
|
287 | 289 | lock = repo.lock() |
|
288 | 290 | try: |
|
289 | 291 | tr = repo.transaction('pushkey: obsolete markers') |
|
290 | 292 | try: |
|
291 | 293 | repo.obsstore.mergemarkers(tr, data) |
|
292 | 294 | tr.close() |
|
293 | 295 | return 1 |
|
294 | 296 | finally: |
|
295 | 297 | tr.release() |
|
296 | 298 | finally: |
|
297 | 299 | lock.release() |
|
298 | 300 | |
|
299 | 301 | def allmarkers(repo): |
|
300 | 302 | """all obsolete markers known in a repository""" |
|
301 | 303 | for markerdata in repo.obsstore: |
|
302 | 304 | yield marker(repo, markerdata) |
|
303 | 305 | |
|
304 | 306 | def precursormarkers(ctx): |
|
305 | 307 | """obsolete marker making this changeset obsolete""" |
|
306 | 308 | for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()): |
|
307 | 309 | yield marker(ctx._repo, data) |
|
308 | 310 | |
|
309 | 311 | def successormarkers(ctx): |
|
310 | 312 | """obsolete marker marking this changeset as a successors""" |
|
311 | 313 | for data in ctx._repo.obsstore.successors.get(ctx.node(), ()): |
|
312 | 314 | yield marker(ctx._repo, data) |
|
313 | 315 | |
|
314 | 316 | def anysuccessors(obsstore, node): |
|
315 | 317 | """Yield every successor of <node> |
|
316 | 318 | |
|
317 | 319 | This is a linear yield unsuitable to detect split changesets.""" |
|
318 | 320 | remaining = set([node]) |
|
319 | 321 | seen = set(remaining) |
|
320 | 322 | while remaining: |
|
321 | 323 | current = remaining.pop() |
|
322 | 324 | yield current |
|
323 | 325 | for mark in obsstore.precursors.get(current, ()): |
|
324 | 326 | for suc in mark[1]: |
|
325 | 327 | if suc not in seen: |
|
326 | 328 | seen.add(suc) |
|
327 | 329 | remaining.add(suc) |
@@ -1,170 +1,170 b'' | |||
|
1 | 1 | # osutil.py - pure Python version of osutil.c |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import os |
|
9 | 9 | import stat as statmod |
|
10 | 10 | |
|
11 | 11 | def _mode_to_kind(mode): |
|
12 | 12 | if statmod.S_ISREG(mode): |
|
13 | 13 | return statmod.S_IFREG |
|
14 | 14 | if statmod.S_ISDIR(mode): |
|
15 | 15 | return statmod.S_IFDIR |
|
16 | 16 | if statmod.S_ISLNK(mode): |
|
17 | 17 | return statmod.S_IFLNK |
|
18 | 18 | if statmod.S_ISBLK(mode): |
|
19 | 19 | return statmod.S_IFBLK |
|
20 | 20 | if statmod.S_ISCHR(mode): |
|
21 | 21 | return statmod.S_IFCHR |
|
22 | 22 | if statmod.S_ISFIFO(mode): |
|
23 | 23 | return statmod.S_IFIFO |
|
24 | 24 | if statmod.S_ISSOCK(mode): |
|
25 | 25 | return statmod.S_IFSOCK |
|
26 | 26 | return mode |
|
27 | 27 | |
|
28 | 28 | def listdir(path, stat=False, skip=None): |
|
29 | 29 | '''listdir(path, stat=False) -> list_of_tuples |
|
30 | 30 | |
|
31 | 31 | Return a sorted list containing information about the entries |
|
32 | 32 | in the directory. |
|
33 | 33 | |
|
34 | 34 | If stat is True, each element is a 3-tuple: |
|
35 | 35 | |
|
36 | 36 | (name, type, stat object) |
|
37 | 37 | |
|
38 | 38 | Otherwise, each element is a 2-tuple: |
|
39 | 39 | |
|
40 | 40 | (name, type) |
|
41 | 41 | ''' |
|
42 | 42 | result = [] |
|
43 | 43 | prefix = path |
|
44 | 44 | if not prefix.endswith(os.sep): |
|
45 | 45 | prefix += os.sep |
|
46 | 46 | names = os.listdir(path) |
|
47 | 47 | names.sort() |
|
48 | 48 | for fn in names: |
|
49 | 49 | st = os.lstat(prefix + fn) |
|
50 | 50 | if fn == skip and statmod.S_ISDIR(st.st_mode): |
|
51 | 51 | return [] |
|
52 | 52 | if stat: |
|
53 | 53 | result.append((fn, _mode_to_kind(st.st_mode), st)) |
|
54 | 54 | else: |
|
55 | 55 | result.append((fn, _mode_to_kind(st.st_mode))) |
|
56 | 56 | return result |
|
57 | 57 | |
|
58 | 58 | if os.name != 'nt': |
|
59 | 59 | posixfile = open |
|
60 | 60 | else: |
|
61 | 61 | import ctypes, msvcrt |
|
62 | 62 | |
|
63 | 63 | _kernel32 = ctypes.windll.kernel32 |
|
64 | 64 | |
|
65 | 65 | _DWORD = ctypes.c_ulong |
|
66 | 66 | _LPCSTR = _LPSTR = ctypes.c_char_p |
|
67 | 67 | _HANDLE = ctypes.c_void_p |
|
68 | 68 | |
|
69 | 69 | _INVALID_HANDLE_VALUE = _HANDLE(-1).value |
|
70 | 70 | |
|
71 | 71 | # CreateFile |
|
72 | 72 | _FILE_SHARE_READ = 0x00000001 |
|
73 | 73 | _FILE_SHARE_WRITE = 0x00000002 |
|
74 | 74 | _FILE_SHARE_DELETE = 0x00000004 |
|
75 | 75 | |
|
76 | 76 | _CREATE_ALWAYS = 2 |
|
77 | 77 | _OPEN_EXISTING = 3 |
|
78 | 78 | _OPEN_ALWAYS = 4 |
|
79 | 79 | |
|
80 | 80 | _GENERIC_READ = 0x80000000 |
|
81 | 81 | _GENERIC_WRITE = 0x40000000 |
|
82 | 82 | |
|
83 | 83 | _FILE_ATTRIBUTE_NORMAL = 0x80 |
|
84 | 84 | |
|
85 |
# |
|
|
85 | # open_osfhandle flags | |
|
86 | 86 | _O_RDONLY = 0x0000 |
|
87 | 87 | _O_RDWR = 0x0002 |
|
88 | 88 | _O_APPEND = 0x0008 |
|
89 | 89 | |
|
90 | 90 | _O_TEXT = 0x4000 |
|
91 | 91 | _O_BINARY = 0x8000 |
|
92 | 92 | |
|
93 | 93 | # types of parameters of C functions used (required by pypy) |
|
94 | 94 | |
|
95 | 95 | _kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p, |
|
96 | 96 | _DWORD, _DWORD, _HANDLE] |
|
97 | 97 | _kernel32.CreateFileA.restype = _HANDLE |
|
98 | 98 | |
|
99 | 99 | def _raiseioerror(name): |
|
100 | 100 | err = ctypes.WinError() |
|
101 | 101 | raise IOError(err.errno, '%s: %s' % (name, err.strerror)) |
|
102 | 102 | |
|
103 | 103 | class posixfile(object): |
|
104 | 104 | '''a file object aiming for POSIX-like semantics |
|
105 | 105 | |
|
106 | 106 | CPython's open() returns a file that was opened *without* setting the |
|
107 | 107 | _FILE_SHARE_DELETE flag, which causes rename and unlink to abort. |
|
108 | 108 | This even happens if any hardlinked copy of the file is in open state. |
|
109 | 109 | We set _FILE_SHARE_DELETE here, so files opened with posixfile can be |
|
110 | 110 | renamed and deleted while they are held open. |
|
111 | 111 | Note that if a file opened with posixfile is unlinked, the file |
|
112 | 112 | remains but cannot be opened again or be recreated under the same name, |
|
113 | 113 | until all reading processes have closed the file.''' |
|
114 | 114 | |
|
115 | 115 | def __init__(self, name, mode='r', bufsize=-1): |
|
116 | 116 | if 'b' in mode: |
|
117 | 117 | flags = _O_BINARY |
|
118 | 118 | else: |
|
119 | 119 | flags = _O_TEXT |
|
120 | 120 | |
|
121 | 121 | m0 = mode[0] |
|
122 | 122 | if m0 == 'r' and '+' not in mode: |
|
123 | 123 | flags |= _O_RDONLY |
|
124 | 124 | access = _GENERIC_READ |
|
125 | 125 | else: |
|
126 | 126 | # work around http://support.microsoft.com/kb/899149 and |
|
127 | 127 | # set _O_RDWR for 'w' and 'a', even if mode has no '+' |
|
128 | 128 | flags |= _O_RDWR |
|
129 | 129 | access = _GENERIC_READ | _GENERIC_WRITE |
|
130 | 130 | |
|
131 | 131 | if m0 == 'r': |
|
132 | 132 | creation = _OPEN_EXISTING |
|
133 | 133 | elif m0 == 'w': |
|
134 | 134 | creation = _CREATE_ALWAYS |
|
135 | 135 | elif m0 == 'a': |
|
136 | 136 | creation = _OPEN_ALWAYS |
|
137 | 137 | flags |= _O_APPEND |
|
138 | 138 | else: |
|
139 | 139 | raise ValueError("invalid mode: %s" % mode) |
|
140 | 140 | |
|
141 | 141 | fh = _kernel32.CreateFileA(name, access, |
|
142 | 142 | _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE, |
|
143 | 143 | None, creation, _FILE_ATTRIBUTE_NORMAL, None) |
|
144 | 144 | if fh == _INVALID_HANDLE_VALUE: |
|
145 | 145 | _raiseioerror(name) |
|
146 | 146 | |
|
147 | 147 | fd = msvcrt.open_osfhandle(fh, flags) |
|
148 | 148 | if fd == -1: |
|
149 | 149 | _kernel32.CloseHandle(fh) |
|
150 | 150 | _raiseioerror(name) |
|
151 | 151 | |
|
152 | 152 | f = os.fdopen(fd, mode, bufsize) |
|
153 | 153 | # unfortunately, f.name is '<fdopen>' at this point -- so we store |
|
154 | 154 | # the name on this wrapper. We cannot just assign to f.name, |
|
155 | 155 | # because that attribute is read-only. |
|
156 | 156 | object.__setattr__(self, 'name', name) |
|
157 | 157 | object.__setattr__(self, '_file', f) |
|
158 | 158 | |
|
159 | 159 | def __iter__(self): |
|
160 | 160 | return self._file |
|
161 | 161 | |
|
162 | 162 | def __getattr__(self, name): |
|
163 | 163 | return getattr(self._file, name) |
|
164 | 164 | |
|
165 | 165 | def __setattr__(self, name, value): |
|
166 | 166 | '''mimics the read-only attributes of Python file objects |
|
167 | 167 | by raising 'TypeError: readonly attribute' if someone tries: |
|
168 | 168 | f = posixfile('foo.txt') |
|
169 | 169 | f.name = 'bla' ''' |
|
170 | 170 | return self._file.__setattr__(name, value) |
General Comments 0
You need to be logged in to leave comments.
Login now