Show More
@@ -1,378 +1,378 | |||
|
1 | 1 | # changelog.py - changelog class for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import weakref |
|
9 | 9 | from node import bin, hex, nullid |
|
10 | 10 | from i18n import _ |
|
11 | 11 | import util, error, revlog, encoding |
|
12 | 12 | |
|
13 | 13 | _defaultextra = {'branch': 'default'} |
|
14 | 14 | |
|
15 | 15 | def _string_escape(text): |
|
16 | 16 | """ |
|
17 | 17 | >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)} |
|
18 | 18 | >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d |
|
19 | 19 | >>> s |
|
20 | 20 | 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n' |
|
21 | 21 | >>> res = _string_escape(s) |
|
22 | 22 | >>> s == res.decode('string_escape') |
|
23 | 23 | True |
|
24 | 24 | """ |
|
25 | 25 | # subset of the string_escape codec |
|
26 | 26 | text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r') |
|
27 | 27 | return text.replace('\0', '\\0') |
|
28 | 28 | |
|
29 | 29 | def decodeextra(text): |
|
30 | 30 | """ |
|
31 | 31 | >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'}) |
|
32 | 32 | ... ).iteritems()) |
|
33 | 33 | [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')] |
|
34 | 34 | >>> sorted(decodeextra(encodeextra({'foo': 'bar', |
|
35 | 35 | ... 'baz': chr(92) + chr(0) + '2'}) |
|
36 | 36 | ... ).iteritems()) |
|
37 | 37 | [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')] |
|
38 | 38 | """ |
|
39 | 39 | extra = _defaultextra.copy() |
|
40 | 40 | for l in text.split('\0'): |
|
41 | 41 | if l: |
|
42 | 42 | if '\\0' in l: |
|
43 | 43 | # fix up \0 without getting into trouble with \\0 |
|
44 | 44 | l = l.replace('\\\\', '\\\\\n') |
|
45 | 45 | l = l.replace('\\0', '\0') |
|
46 | 46 | l = l.replace('\n', '') |
|
47 | 47 | k, v = l.decode('string_escape').split(':', 1) |
|
48 | 48 | extra[k] = v |
|
49 | 49 | return extra |
|
50 | 50 | |
|
51 | 51 | def encodeextra(d): |
|
52 | 52 | # keys must be sorted to produce a deterministic changelog entry |
|
53 | 53 | items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)] |
|
54 | 54 | return "\0".join(items) |
|
55 | 55 | |
|
56 | 56 | def stripdesc(desc): |
|
57 | 57 | """strip trailing whitespace and leading and trailing empty lines""" |
|
58 | 58 | return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n') |
|
59 | 59 | |
|
60 | 60 | class appender(object): |
|
61 | 61 | '''the changelog index must be updated last on disk, so we use this class |
|
62 | 62 | to delay writes to it''' |
|
63 | 63 | def __init__(self, vfs, name, mode, buf): |
|
64 | 64 | self.data = buf |
|
65 | 65 | fp = vfs(name, mode) |
|
66 | 66 | self.fp = fp |
|
67 | 67 | self.offset = fp.tell() |
|
68 | 68 | self.size = vfs.fstat(fp).st_size |
|
69 | 69 | |
|
70 | 70 | def end(self): |
|
71 | 71 | return self.size + len("".join(self.data)) |
|
72 | 72 | def tell(self): |
|
73 | 73 | return self.offset |
|
74 | 74 | def flush(self): |
|
75 | 75 | pass |
|
76 | 76 | def close(self): |
|
77 | 77 | self.fp.close() |
|
78 | 78 | |
|
79 | 79 | def seek(self, offset, whence=0): |
|
80 | 80 | '''virtual file offset spans real file and data''' |
|
81 | 81 | if whence == 0: |
|
82 | 82 | self.offset = offset |
|
83 | 83 | elif whence == 1: |
|
84 | 84 | self.offset += offset |
|
85 | 85 | elif whence == 2: |
|
86 | 86 | self.offset = self.end() + offset |
|
87 | 87 | if self.offset < self.size: |
|
88 | 88 | self.fp.seek(self.offset) |
|
89 | 89 | |
|
90 | 90 | def read(self, count=-1): |
|
91 | 91 | '''only trick here is reads that span real file and data''' |
|
92 | 92 | ret = "" |
|
93 | 93 | if self.offset < self.size: |
|
94 | 94 | s = self.fp.read(count) |
|
95 | 95 | ret = s |
|
96 | 96 | self.offset += len(s) |
|
97 | 97 | if count > 0: |
|
98 | 98 | count -= len(s) |
|
99 | 99 | if count != 0: |
|
100 | 100 | doff = self.offset - self.size |
|
101 | 101 | self.data.insert(0, "".join(self.data)) |
|
102 | 102 | del self.data[1:] |
|
103 | 103 | s = self.data[0][doff:doff + count] |
|
104 | 104 | self.offset += len(s) |
|
105 | 105 | ret += s |
|
106 | 106 | return ret |
|
107 | 107 | |
|
108 | 108 | def write(self, s): |
|
109 | 109 | self.data.append(str(s)) |
|
110 | 110 | self.offset += len(s) |
|
111 | 111 | |
|
112 | 112 | def _divertopener(opener, target): |
|
113 | 113 | """build an opener that writes in 'target.a' instead of 'target'""" |
|
114 | 114 | def _divert(name, mode='r'): |
|
115 | 115 | if name != target: |
|
116 | 116 | return opener(name, mode) |
|
117 | 117 | return opener(name + ".a", mode) |
|
118 | 118 | return _divert |
|
119 | 119 | |
|
120 | 120 | def _delayopener(opener, target, buf): |
|
121 | 121 | """build an opener that stores chunks in 'buf' instead of 'target'""" |
|
122 | 122 | def _delay(name, mode='r'): |
|
123 | 123 | if name != target: |
|
124 | 124 | return opener(name, mode) |
|
125 | 125 | return appender(opener, name, mode, buf) |
|
126 | 126 | return _delay |
|
127 | 127 | |
|
128 | 128 | class changelog(revlog.revlog): |
|
129 | 129 | def __init__(self, opener): |
|
130 | 130 | revlog.revlog.__init__(self, opener, "00changelog.i") |
|
131 | 131 | if self._initempty: |
|
132 | 132 | # changelogs don't benefit from generaldelta |
|
133 | 133 | self.version &= ~revlog.REVLOGGENERALDELTA |
|
134 | 134 | self._generaldelta = False |
|
135 | 135 | self._realopener = opener |
|
136 | 136 | self._delayed = False |
|
137 | 137 | self._delaybuf = None |
|
138 | 138 | self._divert = False |
|
139 | 139 | self.filteredrevs = frozenset() |
|
140 | 140 | |
|
141 | 141 | def tip(self): |
|
142 | 142 | """filtered version of revlog.tip""" |
|
143 | 143 | for i in xrange(len(self) -1, -2, -1): |
|
144 | 144 | if i not in self.filteredrevs: |
|
145 | 145 | return self.node(i) |
|
146 | 146 | |
|
147 | 147 | def __iter__(self): |
|
148 | 148 | """filtered version of revlog.__iter__""" |
|
149 | 149 | if len(self.filteredrevs) == 0: |
|
150 | 150 | return revlog.revlog.__iter__(self) |
|
151 | 151 | |
|
152 | 152 | def filterediter(): |
|
153 | 153 | for i in xrange(len(self)): |
|
154 | 154 | if i not in self.filteredrevs: |
|
155 | 155 | yield i |
|
156 | 156 | |
|
157 | 157 | return filterediter() |
|
158 | 158 | |
|
159 | 159 | def revs(self, start=0, stop=None): |
|
160 | 160 | """filtered version of revlog.revs""" |
|
161 | 161 | for i in super(changelog, self).revs(start, stop): |
|
162 | 162 | if i not in self.filteredrevs: |
|
163 | 163 | yield i |
|
164 | 164 | |
|
165 | 165 | @util.propertycache |
|
166 | 166 | def nodemap(self): |
|
167 | 167 | # XXX need filtering too |
|
168 | 168 | self.rev(self.node(0)) |
|
169 | 169 | return self._nodecache |
|
170 | 170 | |
|
171 | 171 | def hasnode(self, node): |
|
172 | 172 | """filtered version of revlog.hasnode""" |
|
173 | 173 | try: |
|
174 | 174 | i = self.rev(node) |
|
175 | 175 | return i not in self.filteredrevs |
|
176 | 176 | except KeyError: |
|
177 | 177 | return False |
|
178 | 178 | |
|
179 | 179 | def headrevs(self): |
|
180 | 180 | if self.filteredrevs: |
|
181 | 181 | try: |
|
182 | 182 | return self.index.headrevsfiltered(self.filteredrevs) |
|
183 | 183 | # AttributeError covers non-c-extension environments and |
|
184 | 184 | # old c extensions without filter handling. |
|
185 | 185 | except AttributeError: |
|
186 | 186 | return self._headrevs() |
|
187 | 187 | |
|
188 | 188 | return super(changelog, self).headrevs() |
|
189 | 189 | |
|
190 | 190 | def strip(self, *args, **kwargs): |
|
191 | 191 | # XXX make something better than assert |
|
192 | 192 | # We can't expect proper strip behavior if we are filtered. |
|
193 | 193 | assert not self.filteredrevs |
|
194 | 194 | super(changelog, self).strip(*args, **kwargs) |
|
195 | 195 | |
|
196 | 196 | def rev(self, node): |
|
197 | 197 | """filtered version of revlog.rev""" |
|
198 | 198 | r = super(changelog, self).rev(node) |
|
199 | 199 | if r in self.filteredrevs: |
|
200 | 200 | raise error.FilteredLookupError(hex(node), self.indexfile, |
|
201 | 201 | _('filtered node')) |
|
202 | 202 | return r |
|
203 | 203 | |
|
204 | 204 | def node(self, rev): |
|
205 | 205 | """filtered version of revlog.node""" |
|
206 | 206 | if rev in self.filteredrevs: |
|
207 | 207 | raise error.FilteredIndexError(rev) |
|
208 | 208 | return super(changelog, self).node(rev) |
|
209 | 209 | |
|
210 | 210 | def linkrev(self, rev): |
|
211 | 211 | """filtered version of revlog.linkrev""" |
|
212 | 212 | if rev in self.filteredrevs: |
|
213 | 213 | raise error.FilteredIndexError(rev) |
|
214 | 214 | return super(changelog, self).linkrev(rev) |
|
215 | 215 | |
|
216 | 216 | def parentrevs(self, rev): |
|
217 | 217 | """filtered version of revlog.parentrevs""" |
|
218 | 218 | if rev in self.filteredrevs: |
|
219 | 219 | raise error.FilteredIndexError(rev) |
|
220 | 220 | return super(changelog, self).parentrevs(rev) |
|
221 | 221 | |
|
222 | 222 | def flags(self, rev): |
|
223 | 223 | """filtered version of revlog.flags""" |
|
224 | 224 | if rev in self.filteredrevs: |
|
225 | 225 | raise error.FilteredIndexError(rev) |
|
226 | 226 | return super(changelog, self).flags(rev) |
|
227 | 227 | |
|
228 | 228 | def delayupdate(self, tr): |
|
229 | 229 | "delay visibility of index updates to other readers" |
|
230 | 230 | |
|
231 | 231 | if not self._delayed: |
|
232 | 232 | if len(self) == 0: |
|
233 | 233 | self._divert = True |
|
234 | 234 | if self._realopener.exists(self.indexfile + '.a'): |
|
235 | 235 | self._realopener.unlink(self.indexfile + '.a') |
|
236 | 236 | self.opener = _divertopener(self._realopener, self.indexfile) |
|
237 | 237 | else: |
|
238 | 238 | self._delaybuf = [] |
|
239 | 239 | self.opener = _delayopener(self._realopener, self.indexfile, |
|
240 | 240 | self._delaybuf) |
|
241 | 241 | self._delayed = True |
|
242 | 242 | tr.addpending('cl-%i' % id(self), self._writepending) |
|
243 | 243 | trp = weakref.proxy(tr) |
|
244 | 244 | tr.addfinalize('cl-%i' % id(self), lambda: self._finalize(trp)) |
|
245 | 245 | |
|
246 | 246 | def _finalize(self, tr): |
|
247 | 247 | "finalize index updates" |
|
248 | 248 | self._delayed = False |
|
249 | 249 | self.opener = self._realopener |
|
250 | 250 | # move redirected index data back into place |
|
251 | 251 | if self._divert: |
|
252 | 252 | assert not self._delaybuf |
|
253 | 253 | tmpname = self.indexfile + ".a" |
|
254 | 254 | nfile = self.opener.open(tmpname) |
|
255 | 255 | nfile.close() |
|
256 | 256 | self.opener.rename(tmpname, self.indexfile) |
|
257 | 257 | elif self._delaybuf: |
|
258 | 258 | fp = self.opener(self.indexfile, 'a') |
|
259 | 259 | fp.write("".join(self._delaybuf)) |
|
260 | 260 | fp.close() |
|
261 | 261 | self._delaybuf = None |
|
262 | 262 | self._divert = False |
|
263 | 263 | # split when we're done |
|
264 | 264 | self.checkinlinesize(tr) |
|
265 | 265 | |
|
266 | 266 | def readpending(self, file): |
|
267 | 267 | r = revlog.revlog(self.opener, file) |
|
268 | 268 | self.index = r.index |
|
269 | 269 | self.nodemap = r.nodemap |
|
270 | 270 | self._nodecache = r._nodecache |
|
271 | 271 | self._chunkcache = r._chunkcache |
|
272 | 272 | |
|
273 | def _writepending(self): | |
|
273 | def _writepending(self, tr): | |
|
274 | 274 | "create a file containing the unfinalized state for pretxnchangegroup" |
|
275 | 275 | if self._delaybuf: |
|
276 | 276 | # make a temporary copy of the index |
|
277 | 277 | fp1 = self._realopener(self.indexfile) |
|
278 | 278 | fp2 = self._realopener(self.indexfile + ".a", "w") |
|
279 | 279 | fp2.write(fp1.read()) |
|
280 | 280 | # add pending data |
|
281 | 281 | fp2.write("".join(self._delaybuf)) |
|
282 | 282 | fp2.close() |
|
283 | 283 | # switch modes so finalize can simply rename |
|
284 | 284 | self._delaybuf = None |
|
285 | 285 | self._divert = True |
|
286 | 286 | self.opener = _divertopener(self._realopener, self.indexfile) |
|
287 | 287 | |
|
288 | 288 | if self._divert: |
|
289 | 289 | return True |
|
290 | 290 | |
|
291 | 291 | return False |
|
292 | 292 | |
|
293 | 293 | def checkinlinesize(self, tr, fp=None): |
|
294 | 294 | if not self._delayed: |
|
295 | 295 | revlog.revlog.checkinlinesize(self, tr, fp) |
|
296 | 296 | |
|
297 | 297 | def read(self, node): |
|
298 | 298 | """ |
|
299 | 299 | format used: |
|
300 | 300 | nodeid\n : manifest node in ascii |
|
301 | 301 | user\n : user, no \n or \r allowed |
|
302 | 302 | time tz extra\n : date (time is int or float, timezone is int) |
|
303 | 303 | : extra is metadata, encoded and separated by '\0' |
|
304 | 304 | : older versions ignore it |
|
305 | 305 | files\n\n : files modified by the cset, no \n or \r allowed |
|
306 | 306 | (.*) : comment (free text, ideally utf-8) |
|
307 | 307 | |
|
308 | 308 | changelog v0 doesn't use extra |
|
309 | 309 | """ |
|
310 | 310 | text = self.revision(node) |
|
311 | 311 | if not text: |
|
312 | 312 | return (nullid, "", (0, 0), [], "", _defaultextra) |
|
313 | 313 | last = text.index("\n\n") |
|
314 | 314 | desc = encoding.tolocal(text[last + 2:]) |
|
315 | 315 | l = text[:last].split('\n') |
|
316 | 316 | manifest = bin(l[0]) |
|
317 | 317 | user = encoding.tolocal(l[1]) |
|
318 | 318 | |
|
319 | 319 | tdata = l[2].split(' ', 2) |
|
320 | 320 | if len(tdata) != 3: |
|
321 | 321 | time = float(tdata[0]) |
|
322 | 322 | try: |
|
323 | 323 | # various tools did silly things with the time zone field. |
|
324 | 324 | timezone = int(tdata[1]) |
|
325 | 325 | except ValueError: |
|
326 | 326 | timezone = 0 |
|
327 | 327 | extra = _defaultextra |
|
328 | 328 | else: |
|
329 | 329 | time, timezone = float(tdata[0]), int(tdata[1]) |
|
330 | 330 | extra = decodeextra(tdata[2]) |
|
331 | 331 | |
|
332 | 332 | files = l[3:] |
|
333 | 333 | return (manifest, user, (time, timezone), files, desc, extra) |
|
334 | 334 | |
|
335 | 335 | def add(self, manifest, files, desc, transaction, p1, p2, |
|
336 | 336 | user, date=None, extra=None): |
|
337 | 337 | # Convert to UTF-8 encoded bytestrings as the very first |
|
338 | 338 | # thing: calling any method on a localstr object will turn it |
|
339 | 339 | # into a str object and the cached UTF-8 string is thus lost. |
|
340 | 340 | user, desc = encoding.fromlocal(user), encoding.fromlocal(desc) |
|
341 | 341 | |
|
342 | 342 | user = user.strip() |
|
343 | 343 | # An empty username or a username with a "\n" will make the |
|
344 | 344 | # revision text contain two "\n\n" sequences -> corrupt |
|
345 | 345 | # repository since read cannot unpack the revision. |
|
346 | 346 | if not user: |
|
347 | 347 | raise error.RevlogError(_("empty username")) |
|
348 | 348 | if "\n" in user: |
|
349 | 349 | raise error.RevlogError(_("username %s contains a newline") |
|
350 | 350 | % repr(user)) |
|
351 | 351 | |
|
352 | 352 | desc = stripdesc(desc) |
|
353 | 353 | |
|
354 | 354 | if date: |
|
355 | 355 | parseddate = "%d %d" % util.parsedate(date) |
|
356 | 356 | else: |
|
357 | 357 | parseddate = "%d %d" % util.makedate() |
|
358 | 358 | if extra: |
|
359 | 359 | branch = extra.get("branch") |
|
360 | 360 | if branch in ("default", ""): |
|
361 | 361 | del extra["branch"] |
|
362 | 362 | elif branch in (".", "null", "tip"): |
|
363 | 363 | raise error.RevlogError(_('the name \'%s\' is reserved') |
|
364 | 364 | % branch) |
|
365 | 365 | if extra: |
|
366 | 366 | extra = encodeextra(extra) |
|
367 | 367 | parseddate = "%s %s" % (parseddate, extra) |
|
368 | 368 | l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc] |
|
369 | 369 | text = "\n".join(l) |
|
370 | 370 | return self.addrevision(text, transaction, len(self), p1, p2) |
|
371 | 371 | |
|
372 | 372 | def branchinfo(self, rev): |
|
373 | 373 | """return the branch name and open/close state of a revision |
|
374 | 374 | |
|
375 | 375 | This function exists because creating a changectx object |
|
376 | 376 | just to access this is costly.""" |
|
377 | 377 | extra = self.read(rev)[5] |
|
378 | 378 | return encoding.tolocal(extra.get("branch")), 'close' in extra |
@@ -1,429 +1,431 | |||
|
1 | 1 | # transaction.py - simple journaling scheme for mercurial |
|
2 | 2 | # |
|
3 | 3 | # This transaction scheme is intended to gracefully handle program |
|
4 | 4 | # errors and interruptions. More serious failures like system crashes |
|
5 | 5 | # can be recovered with an fsck-like tool. As the whole repository is |
|
6 | 6 | # effectively log-structured, this should amount to simply truncating |
|
7 | 7 | # anything that isn't referenced in the changelog. |
|
8 | 8 | # |
|
9 | 9 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
10 | 10 | # |
|
11 | 11 | # This software may be used and distributed according to the terms of the |
|
12 | 12 | # GNU General Public License version 2 or any later version. |
|
13 | 13 | |
|
14 | 14 | from i18n import _ |
|
15 | 15 | import errno |
|
16 | 16 | import error, util |
|
17 | 17 | |
|
18 | 18 | version = 1 |
|
19 | 19 | |
|
20 | 20 | def active(func): |
|
21 | 21 | def _active(self, *args, **kwds): |
|
22 | 22 | if self.count == 0: |
|
23 | 23 | raise error.Abort(_( |
|
24 | 24 | 'cannot use transaction when it is already committed/aborted')) |
|
25 | 25 | return func(self, *args, **kwds) |
|
26 | 26 | return _active |
|
27 | 27 | |
|
28 | 28 | def _playback(journal, report, opener, entries, backupentries, unlink=True): |
|
29 | 29 | for f, o, _ignore in entries: |
|
30 | 30 | if o or not unlink: |
|
31 | 31 | try: |
|
32 | 32 | fp = opener(f, 'a') |
|
33 | 33 | fp.truncate(o) |
|
34 | 34 | fp.close() |
|
35 | 35 | except IOError: |
|
36 | 36 | report(_("failed to truncate %s\n") % f) |
|
37 | 37 | raise |
|
38 | 38 | else: |
|
39 | 39 | try: |
|
40 | 40 | opener.unlink(f) |
|
41 | 41 | except (IOError, OSError), inst: |
|
42 | 42 | if inst.errno != errno.ENOENT: |
|
43 | 43 | raise |
|
44 | 44 | |
|
45 | 45 | backupfiles = [] |
|
46 | 46 | for f, b in backupentries: |
|
47 | 47 | if b: |
|
48 | 48 | filepath = opener.join(f) |
|
49 | 49 | backuppath = opener.join(b) |
|
50 | 50 | try: |
|
51 | 51 | util.copyfile(backuppath, filepath) |
|
52 | 52 | backupfiles.append(b) |
|
53 | 53 | except IOError: |
|
54 | 54 | report(_("failed to recover %s\n") % f) |
|
55 | 55 | raise |
|
56 | 56 | else: |
|
57 | 57 | try: |
|
58 | 58 | opener.unlink(f) |
|
59 | 59 | except (IOError, OSError), inst: |
|
60 | 60 | if inst.errno != errno.ENOENT: |
|
61 | 61 | raise |
|
62 | 62 | |
|
63 | 63 | opener.unlink(journal) |
|
64 | 64 | backuppath = "%s.backupfiles" % journal |
|
65 | 65 | if opener.exists(backuppath): |
|
66 | 66 | opener.unlink(backuppath) |
|
67 | 67 | for f in backupfiles: |
|
68 | 68 | opener.unlink(f) |
|
69 | 69 | |
|
70 | 70 | class transaction(object): |
|
71 | 71 | def __init__(self, report, opener, journal, after=None, createmode=None, |
|
72 | 72 | onclose=None, onabort=None): |
|
73 | 73 | """Begin a new transaction |
|
74 | 74 | |
|
75 | 75 | Begins a new transaction that allows rolling back writes in the event of |
|
76 | 76 | an exception. |
|
77 | 77 | |
|
78 | 78 | * `after`: called after the transaction has been committed |
|
79 | 79 | * `createmode`: the mode of the journal file that will be created |
|
80 | 80 | * `onclose`: called as the transaction is closing, but before it is |
|
81 | 81 | closed |
|
82 | 82 | * `onabort`: called as the transaction is aborting, but before any files |
|
83 | 83 | have been truncated |
|
84 | 84 | """ |
|
85 | 85 | self.count = 1 |
|
86 | 86 | self.usages = 1 |
|
87 | 87 | self.report = report |
|
88 | 88 | self.opener = opener |
|
89 | 89 | self.after = after |
|
90 | 90 | self.onclose = onclose |
|
91 | 91 | self.onabort = onabort |
|
92 | 92 | self.entries = [] |
|
93 | 93 | self.map = {} |
|
94 | 94 | self.journal = journal |
|
95 | 95 | self._queue = [] |
|
96 | 96 | # a dict of arguments to be passed to hooks |
|
97 | 97 | self.hookargs = {} |
|
98 | 98 | self.file = opener.open(self.journal, "w") |
|
99 | 99 | |
|
100 | 100 | # a list of ('path', 'backuppath') entries. |
|
101 | 101 | # if 'backuppath' is empty, no file existed at backup time |
|
102 | 102 | self._backupentries = [] |
|
103 | 103 | self._backupmap = {} |
|
104 | 104 | self._backupjournal = "%s.backupfiles" % journal |
|
105 | 105 | self._backupsfile = opener.open(self._backupjournal, 'w') |
|
106 | 106 | self._backupsfile.write('%d\n' % version) |
|
107 | 107 | |
|
108 | 108 | if createmode is not None: |
|
109 | 109 | opener.chmod(self.journal, createmode & 0666) |
|
110 | 110 | opener.chmod(self._backupjournal, createmode & 0666) |
|
111 | 111 | |
|
112 | 112 | # hold file generations to be performed on commit |
|
113 | 113 | self._filegenerators = {} |
|
114 | 114 | # hold callbalk to write pending data for hooks |
|
115 | 115 | self._pendingcallback = {} |
|
116 | 116 | # True is any pending data have been written ever |
|
117 | 117 | self._anypending = False |
|
118 | 118 | # holds callback to call when writing the transaction |
|
119 | 119 | self._finalizecallback = {} |
|
120 | 120 | # hold callbalk for post transaction close |
|
121 | 121 | self._postclosecallback = {} |
|
122 | 122 | |
|
123 | 123 | def __del__(self): |
|
124 | 124 | if self.journal: |
|
125 | 125 | self._abort() |
|
126 | 126 | |
|
127 | 127 | @active |
|
128 | 128 | def startgroup(self): |
|
129 | 129 | """delay registration of file entry |
|
130 | 130 | |
|
131 | 131 | This is used by strip to delay vision of strip offset. The transaction |
|
132 | 132 | sees either none or all of the strip actions to be done.""" |
|
133 | 133 | self._queue.append([]) |
|
134 | 134 | |
|
135 | 135 | @active |
|
136 | 136 | def endgroup(self): |
|
137 | 137 | """apply delayed registration of file entry. |
|
138 | 138 | |
|
139 | 139 | This is used by strip to delay vision of strip offset. The transaction |
|
140 | 140 | sees either none or all of the strip actions to be done.""" |
|
141 | 141 | q = self._queue.pop() |
|
142 | 142 | for f, o, data in q: |
|
143 | 143 | self._addentry(f, o, data) |
|
144 | 144 | |
|
145 | 145 | @active |
|
146 | 146 | def add(self, file, offset, data=None): |
|
147 | 147 | """record the state of an append-only file before update""" |
|
148 | 148 | if file in self.map or file in self._backupmap: |
|
149 | 149 | return |
|
150 | 150 | if self._queue: |
|
151 | 151 | self._queue[-1].append((file, offset, data)) |
|
152 | 152 | return |
|
153 | 153 | |
|
154 | 154 | self._addentry(file, offset, data) |
|
155 | 155 | |
|
156 | 156 | def _addentry(self, file, offset, data): |
|
157 | 157 | """add a append-only entry to memory and on-disk state""" |
|
158 | 158 | if file in self.map or file in self._backupmap: |
|
159 | 159 | return |
|
160 | 160 | self.entries.append((file, offset, data)) |
|
161 | 161 | self.map[file] = len(self.entries) - 1 |
|
162 | 162 | # add enough data to the journal to do the truncate |
|
163 | 163 | self.file.write("%s\0%d\n" % (file, offset)) |
|
164 | 164 | self.file.flush() |
|
165 | 165 | |
|
166 | 166 | @active |
|
167 | 167 | def addbackup(self, file, hardlink=True, vfs=None): |
|
168 | 168 | """Adds a backup of the file to the transaction |
|
169 | 169 | |
|
170 | 170 | Calling addbackup() creates a hardlink backup of the specified file |
|
171 | 171 | that is used to recover the file in the event of the transaction |
|
172 | 172 | aborting. |
|
173 | 173 | |
|
174 | 174 | * `file`: the file path, relative to .hg/store |
|
175 | 175 | * `hardlink`: use a hardlink to quickly create the backup |
|
176 | 176 | """ |
|
177 | 177 | if self._queue: |
|
178 | 178 | msg = 'cannot use transaction.addbackup inside "group"' |
|
179 | 179 | raise RuntimeError(msg) |
|
180 | 180 | |
|
181 | 181 | if file in self.map or file in self._backupmap: |
|
182 | 182 | return |
|
183 | 183 | backupfile = "%s.backup.%s" % (self.journal, file) |
|
184 | 184 | if vfs is None: |
|
185 | 185 | vfs = self.opener |
|
186 | 186 | if vfs.exists(file): |
|
187 | 187 | filepath = vfs.join(file) |
|
188 | 188 | backuppath = self.opener.join(backupfile) |
|
189 | 189 | util.copyfiles(filepath, backuppath, hardlink=hardlink) |
|
190 | 190 | else: |
|
191 | 191 | backupfile = '' |
|
192 | 192 | |
|
193 | 193 | self._backupentries.append((file, backupfile)) |
|
194 | 194 | self._backupmap[file] = len(self._backupentries) - 1 |
|
195 | 195 | self._backupsfile.write("%s\0%s\n" % (file, backupfile)) |
|
196 | 196 | self._backupsfile.flush() |
|
197 | 197 | |
|
198 | 198 | @active |
|
199 | 199 | def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None): |
|
200 | 200 | """add a function to generates some files at transaction commit |
|
201 | 201 | |
|
202 | 202 | The `genfunc` argument is a function capable of generating proper |
|
203 | 203 | content of each entry in the `filename` tuple. |
|
204 | 204 | |
|
205 | 205 | At transaction close time, `genfunc` will be called with one file |
|
206 | 206 | object argument per entries in `filenames`. |
|
207 | 207 | |
|
208 | 208 | The transaction itself is responsible for the backup, creation and |
|
209 | 209 | final write of such file. |
|
210 | 210 | |
|
211 | 211 | The `genid` argument is used to ensure the same set of file is only |
|
212 | 212 | generated once. Call to `addfilegenerator` for a `genid` already |
|
213 | 213 | present will overwrite the old entry. |
|
214 | 214 | |
|
215 | 215 | The `order` argument may be used to control the order in which multiple |
|
216 | 216 | generator will be executed. |
|
217 | 217 | """ |
|
218 | 218 | # For now, we are unable to do proper backup and restore of custom vfs |
|
219 | 219 | # but for bookmarks that are handled outside this mechanism. |
|
220 | 220 | assert vfs is None or filenames == ('bookmarks',) |
|
221 | 221 | self._filegenerators[genid] = (order, filenames, genfunc, vfs) |
|
222 | 222 | |
|
223 | 223 | def _generatefiles(self): |
|
224 | 224 | # write files registered for generation |
|
225 | 225 | for entry in sorted(self._filegenerators.values()): |
|
226 | 226 | order, filenames, genfunc, vfs = entry |
|
227 | 227 | if vfs is None: |
|
228 | 228 | vfs = self.opener |
|
229 | 229 | files = [] |
|
230 | 230 | try: |
|
231 | 231 | for name in filenames: |
|
232 | 232 | # Some files are already backed up when creating the |
|
233 | 233 | # localrepo. Until this is properly fixed we disable the |
|
234 | 234 | # backup for them. |
|
235 | 235 | if name not in ('phaseroots', 'bookmarks'): |
|
236 | 236 | self.addbackup(name) |
|
237 | 237 | files.append(vfs(name, 'w', atomictemp=True)) |
|
238 | 238 | genfunc(*files) |
|
239 | 239 | finally: |
|
240 | 240 | for f in files: |
|
241 | 241 | f.close() |
|
242 | 242 | |
|
243 | 243 | @active |
|
244 | 244 | def find(self, file): |
|
245 | 245 | if file in self.map: |
|
246 | 246 | return self.entries[self.map[file]] |
|
247 | 247 | if file in self._backupmap: |
|
248 | 248 | return self._backupentries[self._backupmap[file]] |
|
249 | 249 | return None |
|
250 | 250 | |
|
251 | 251 | @active |
|
252 | 252 | def replace(self, file, offset, data=None): |
|
253 | 253 | ''' |
|
254 | 254 | replace can only replace already committed entries |
|
255 | 255 | that are not pending in the queue |
|
256 | 256 | ''' |
|
257 | 257 | |
|
258 | 258 | if file not in self.map: |
|
259 | 259 | raise KeyError(file) |
|
260 | 260 | index = self.map[file] |
|
261 | 261 | self.entries[index] = (file, offset, data) |
|
262 | 262 | self.file.write("%s\0%d\n" % (file, offset)) |
|
263 | 263 | self.file.flush() |
|
264 | 264 | |
|
265 | 265 | @active |
|
266 | 266 | def nest(self): |
|
267 | 267 | self.count += 1 |
|
268 | 268 | self.usages += 1 |
|
269 | 269 | return self |
|
270 | 270 | |
|
271 | 271 | def release(self): |
|
272 | 272 | if self.count > 0: |
|
273 | 273 | self.usages -= 1 |
|
274 | 274 | # if the transaction scopes are left without being closed, fail |
|
275 | 275 | if self.count > 0 and self.usages == 0: |
|
276 | 276 | self._abort() |
|
277 | 277 | |
|
278 | 278 | def running(self): |
|
279 | 279 | return self.count > 0 |
|
280 | 280 | |
|
281 | 281 | def addpending(self, category, callback): |
|
282 | 282 | """add a callback to be called when the transaction is pending |
|
283 | 283 | |
|
284 | The transaction will be given as callback's first argument. | |
|
285 | ||
|
284 | 286 | Category is a unique identifier to allow overwriting an old callback |
|
285 | 287 | with a newer callback. |
|
286 | 288 | """ |
|
287 | 289 | self._pendingcallback[category] = callback |
|
288 | 290 | |
|
289 | 291 | @active |
|
290 | 292 | def writepending(self): |
|
291 | 293 | '''write pending file to temporary version |
|
292 | 294 | |
|
293 | 295 | This is used to allow hooks to view a transaction before commit''' |
|
294 | 296 | categories = sorted(self._pendingcallback) |
|
295 | 297 | for cat in categories: |
|
296 | 298 | # remove callback since the data will have been flushed |
|
297 | any = self._pendingcallback.pop(cat)() | |
|
299 | any = self._pendingcallback.pop(cat)(self) | |
|
298 | 300 | self._anypending = self._anypending or any |
|
299 | 301 | return self._anypending |
|
300 | 302 | |
|
301 | 303 | @active |
|
302 | 304 | def addfinalize(self, category, callback): |
|
303 | 305 | """add a callback to be called when the transaction is closed |
|
304 | 306 | |
|
305 | 307 | Category is a unique identifier to allow overwriting old callbacks with |
|
306 | 308 | newer callbacks. |
|
307 | 309 | """ |
|
308 | 310 | self._finalizecallback[category] = callback |
|
309 | 311 | |
|
310 | 312 | @active |
|
311 | 313 | def addpostclose(self, category, callback): |
|
312 | 314 | """add a callback to be called after the transaction is closed |
|
313 | 315 | |
|
314 | 316 | Category is a unique identifier to allow overwriting an old callback |
|
315 | 317 | with a newer callback. |
|
316 | 318 | """ |
|
317 | 319 | self._postclosecallback[category] = callback |
|
318 | 320 | |
|
319 | 321 | @active |
|
320 | 322 | def close(self): |
|
321 | 323 | '''commit the transaction''' |
|
322 | 324 | if self.count == 1 and self.onclose is not None: |
|
323 | 325 | self._generatefiles() |
|
324 | 326 | categories = sorted(self._finalizecallback) |
|
325 | 327 | for cat in categories: |
|
326 | 328 | self._finalizecallback[cat]() |
|
327 | 329 | self.onclose() |
|
328 | 330 | |
|
329 | 331 | self.count -= 1 |
|
330 | 332 | if self.count != 0: |
|
331 | 333 | return |
|
332 | 334 | self.file.close() |
|
333 | 335 | self._backupsfile.close() |
|
334 | 336 | self.entries = [] |
|
335 | 337 | if self.after: |
|
336 | 338 | self.after() |
|
337 | 339 | if self.opener.isfile(self.journal): |
|
338 | 340 | self.opener.unlink(self.journal) |
|
339 | 341 | if self.opener.isfile(self._backupjournal): |
|
340 | 342 | self.opener.unlink(self._backupjournal) |
|
341 | 343 | for _f, b in self._backupentries: |
|
342 | 344 | if b: |
|
343 | 345 | self.opener.unlink(b) |
|
344 | 346 | self._backupentries = [] |
|
345 | 347 | self.journal = None |
|
346 | 348 | # run post close action |
|
347 | 349 | categories = sorted(self._postclosecallback) |
|
348 | 350 | for cat in categories: |
|
349 | 351 | self._postclosecallback[cat]() |
|
350 | 352 | |
|
351 | 353 | @active |
|
352 | 354 | def abort(self): |
|
353 | 355 | '''abort the transaction (generally called on error, or when the |
|
354 | 356 | transaction is not explicitly committed before going out of |
|
355 | 357 | scope)''' |
|
356 | 358 | self._abort() |
|
357 | 359 | |
|
358 | 360 | def _abort(self): |
|
359 | 361 | self.count = 0 |
|
360 | 362 | self.usages = 0 |
|
361 | 363 | self.file.close() |
|
362 | 364 | self._backupsfile.close() |
|
363 | 365 | |
|
364 | 366 | if self.onabort is not None: |
|
365 | 367 | self.onabort() |
|
366 | 368 | |
|
367 | 369 | try: |
|
368 | 370 | if not self.entries and not self._backupentries: |
|
369 | 371 | if self.journal: |
|
370 | 372 | self.opener.unlink(self.journal) |
|
371 | 373 | if self._backupjournal: |
|
372 | 374 | self.opener.unlink(self._backupjournal) |
|
373 | 375 | return |
|
374 | 376 | |
|
375 | 377 | self.report(_("transaction abort!\n")) |
|
376 | 378 | |
|
377 | 379 | try: |
|
378 | 380 | _playback(self.journal, self.report, self.opener, |
|
379 | 381 | self.entries, self._backupentries, False) |
|
380 | 382 | self.report(_("rollback completed\n")) |
|
381 | 383 | except Exception: |
|
382 | 384 | self.report(_("rollback failed - please run hg recover\n")) |
|
383 | 385 | finally: |
|
384 | 386 | self.journal = None |
|
385 | 387 | |
|
386 | 388 | |
|
387 | 389 | def rollback(opener, file, report): |
|
388 | 390 | """Rolls back the transaction contained in the given file |
|
389 | 391 | |
|
390 | 392 | Reads the entries in the specified file, and the corresponding |
|
391 | 393 | '*.backupfiles' file, to recover from an incomplete transaction. |
|
392 | 394 | |
|
393 | 395 | * `file`: a file containing a list of entries, specifying where |
|
394 | 396 | to truncate each file. The file should contain a list of |
|
395 | 397 | file\0offset pairs, delimited by newlines. The corresponding |
|
396 | 398 | '*.backupfiles' file should contain a list of file\0backupfile |
|
397 | 399 | pairs, delimited by \0. |
|
398 | 400 | """ |
|
399 | 401 | entries = [] |
|
400 | 402 | backupentries = [] |
|
401 | 403 | |
|
402 | 404 | fp = opener.open(file) |
|
403 | 405 | lines = fp.readlines() |
|
404 | 406 | fp.close() |
|
405 | 407 | for l in lines: |
|
406 | 408 | try: |
|
407 | 409 | f, o = l.split('\0') |
|
408 | 410 | entries.append((f, int(o), None)) |
|
409 | 411 | except ValueError: |
|
410 | 412 | report(_("couldn't read journal entry %r!\n") % l) |
|
411 | 413 | |
|
412 | 414 | backupjournal = "%s.backupfiles" % file |
|
413 | 415 | if opener.exists(backupjournal): |
|
414 | 416 | fp = opener.open(backupjournal) |
|
415 | 417 | lines = fp.readlines() |
|
416 | 418 | if lines: |
|
417 | 419 | ver = lines[0][:-1] |
|
418 | 420 | if ver == str(version): |
|
419 | 421 | for line in lines[1:]: |
|
420 | 422 | if line: |
|
421 | 423 | # Shave off the trailing newline |
|
422 | 424 | line = line[:-1] |
|
423 | 425 | f, b = line.split('\0') |
|
424 | 426 | backupentries.append((f, b)) |
|
425 | 427 | else: |
|
426 | 428 | report(_("journal was created by a newer version of " |
|
427 | 429 | "Mercurial")) |
|
428 | 430 | |
|
429 | 431 | _playback(file, report, opener, entries, backupentries) |
General Comments 0
You need to be logged in to leave comments.
Login now