Show More
@@ -1,764 +1,768 | |||||
1 | # transaction.py - simple journaling scheme for mercurial |
|
1 | # transaction.py - simple journaling scheme for mercurial | |
2 | # |
|
2 | # | |
3 | # This transaction scheme is intended to gracefully handle program |
|
3 | # This transaction scheme is intended to gracefully handle program | |
4 | # errors and interruptions. More serious failures like system crashes |
|
4 | # errors and interruptions. More serious failures like system crashes | |
5 | # can be recovered with an fsck-like tool. As the whole repository is |
|
5 | # can be recovered with an fsck-like tool. As the whole repository is | |
6 | # effectively log-structured, this should amount to simply truncating |
|
6 | # effectively log-structured, this should amount to simply truncating | |
7 | # anything that isn't referenced in the changelog. |
|
7 | # anything that isn't referenced in the changelog. | |
8 | # |
|
8 | # | |
9 | # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com> |
|
9 | # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com> | |
10 | # |
|
10 | # | |
11 | # This software may be used and distributed according to the terms of the |
|
11 | # This software may be used and distributed according to the terms of the | |
12 | # GNU General Public License version 2 or any later version. |
|
12 | # GNU General Public License version 2 or any later version. | |
13 |
|
13 | |||
14 | from __future__ import absolute_import |
|
14 | from __future__ import absolute_import | |
15 |
|
15 | |||
16 | import errno |
|
16 | import errno | |
17 |
|
17 | |||
18 | from .i18n import _ |
|
18 | from .i18n import _ | |
19 | from . import ( |
|
19 | from . import ( | |
20 | error, |
|
20 | error, | |
21 | pycompat, |
|
21 | pycompat, | |
22 | util, |
|
22 | util, | |
23 | ) |
|
23 | ) | |
24 | from .utils import stringutil |
|
24 | from .utils import stringutil | |
25 |
|
25 | |||
26 | version = 2 |
|
26 | version = 2 | |
27 |
|
27 | |||
28 | # These are the file generators that should only be executed after the |
|
28 | # These are the file generators that should only be executed after the | |
29 | # finalizers are done, since they rely on the output of the finalizers (like |
|
29 | # finalizers are done, since they rely on the output of the finalizers (like | |
30 | # the changelog having been written). |
|
30 | # the changelog having been written). | |
31 | postfinalizegenerators = {b'bookmarks', b'dirstate'} |
|
31 | postfinalizegenerators = {b'bookmarks', b'dirstate'} | |
32 |
|
32 | |||
33 | GEN_GROUP_ALL = b'all' |
|
33 | GEN_GROUP_ALL = b'all' | |
34 | GEN_GROUP_PRE_FINALIZE = b'prefinalize' |
|
34 | GEN_GROUP_PRE_FINALIZE = b'prefinalize' | |
35 | GEN_GROUP_POST_FINALIZE = b'postfinalize' |
|
35 | GEN_GROUP_POST_FINALIZE = b'postfinalize' | |
36 |
|
36 | |||
37 |
|
37 | |||
38 | def active(func): |
|
38 | def active(func): | |
39 | def _active(self, *args, **kwds): |
|
39 | def _active(self, *args, **kwds): | |
40 | if self._count == 0: |
|
40 | if self._count == 0: | |
41 | raise error.ProgrammingError( |
|
41 | raise error.ProgrammingError( | |
42 | b'cannot use transaction when it is already committed/aborted' |
|
42 | b'cannot use transaction when it is already committed/aborted' | |
43 | ) |
|
43 | ) | |
44 | return func(self, *args, **kwds) |
|
44 | return func(self, *args, **kwds) | |
45 |
|
45 | |||
46 | return _active |
|
46 | return _active | |
47 |
|
47 | |||
48 |
|
48 | |||
49 | def _playback( |
|
49 | def _playback( | |
50 | journal, |
|
50 | journal, | |
51 | report, |
|
51 | report, | |
52 | opener, |
|
52 | opener, | |
53 | vfsmap, |
|
53 | vfsmap, | |
54 | entries, |
|
54 | entries, | |
55 | backupentries, |
|
55 | backupentries, | |
56 | unlink=True, |
|
56 | unlink=True, | |
57 | checkambigfiles=None, |
|
57 | checkambigfiles=None, | |
58 | ): |
|
58 | ): | |
59 | for f, o in sorted(dict(entries).items()): |
|
59 | for f, o in sorted(dict(entries).items()): | |
60 | if o or not unlink: |
|
60 | if o or not unlink: | |
61 | checkambig = checkambigfiles and (f, b'') in checkambigfiles |
|
61 | checkambig = checkambigfiles and (f, b'') in checkambigfiles | |
62 | try: |
|
62 | try: | |
63 | fp = opener(f, b'a', checkambig=checkambig) |
|
63 | fp = opener(f, b'a', checkambig=checkambig) | |
64 | if fp.tell() < o: |
|
64 | if fp.tell() < o: | |
65 | raise error.Abort( |
|
65 | raise error.Abort( | |
66 | _( |
|
66 | _( | |
67 | b"attempted to truncate %s to %d bytes, but it was " |
|
67 | b"attempted to truncate %s to %d bytes, but it was " | |
68 | b"already %d bytes\n" |
|
68 | b"already %d bytes\n" | |
69 | ) |
|
69 | ) | |
70 | % (f, o, fp.tell()) |
|
70 | % (f, o, fp.tell()) | |
71 | ) |
|
71 | ) | |
72 | fp.truncate(o) |
|
72 | fp.truncate(o) | |
73 | fp.close() |
|
73 | fp.close() | |
74 | except IOError: |
|
74 | except IOError: | |
75 | report(_(b"failed to truncate %s\n") % f) |
|
75 | report(_(b"failed to truncate %s\n") % f) | |
76 | raise |
|
76 | raise | |
77 | else: |
|
77 | else: | |
78 | try: |
|
78 | try: | |
79 | opener.unlink(f) |
|
79 | opener.unlink(f) | |
80 | except (IOError, OSError) as inst: |
|
80 | except (IOError, OSError) as inst: | |
81 | if inst.errno != errno.ENOENT: |
|
81 | if inst.errno != errno.ENOENT: | |
82 | raise |
|
82 | raise | |
83 |
|
83 | |||
84 | backupfiles = [] |
|
84 | backupfiles = [] | |
85 | for l, f, b, c in backupentries: |
|
85 | for l, f, b, c in backupentries: | |
86 | if l not in vfsmap and c: |
|
86 | if l not in vfsmap and c: | |
87 | report(b"couldn't handle %s: unknown cache location %s\n" % (b, l)) |
|
87 | report(b"couldn't handle %s: unknown cache location %s\n" % (b, l)) | |
88 | vfs = vfsmap[l] |
|
88 | vfs = vfsmap[l] | |
89 | try: |
|
89 | try: | |
90 | if f and b: |
|
90 | if f and b: | |
91 | filepath = vfs.join(f) |
|
91 | filepath = vfs.join(f) | |
92 | backuppath = vfs.join(b) |
|
92 | backuppath = vfs.join(b) | |
93 | checkambig = checkambigfiles and (f, l) in checkambigfiles |
|
93 | checkambig = checkambigfiles and (f, l) in checkambigfiles | |
94 | try: |
|
94 | try: | |
95 | util.copyfile(backuppath, filepath, checkambig=checkambig) |
|
95 | util.copyfile(backuppath, filepath, checkambig=checkambig) | |
96 | backupfiles.append(b) |
|
96 | backupfiles.append(b) | |
97 | except IOError as exc: |
|
97 | except IOError as exc: | |
98 | e_msg = stringutil.forcebytestr(exc) |
|
98 | e_msg = stringutil.forcebytestr(exc) | |
99 | report(_(b"failed to recover %s (%s)\n") % (f, e_msg)) |
|
99 | report(_(b"failed to recover %s (%s)\n") % (f, e_msg)) | |
100 | else: |
|
100 | else: | |
101 | target = f or b |
|
101 | target = f or b | |
102 | try: |
|
102 | try: | |
103 | vfs.unlink(target) |
|
103 | vfs.unlink(target) | |
104 | except (IOError, OSError) as inst: |
|
104 | except (IOError, OSError) as inst: | |
105 | if inst.errno != errno.ENOENT: |
|
105 | if inst.errno != errno.ENOENT: | |
106 | raise |
|
106 | raise | |
107 | except (IOError, OSError, error.Abort): |
|
107 | except (IOError, OSError, error.Abort): | |
108 | if not c: |
|
108 | if not c: | |
109 | raise |
|
109 | raise | |
110 |
|
110 | |||
111 | backuppath = b"%s.backupfiles" % journal |
|
111 | backuppath = b"%s.backupfiles" % journal | |
112 | if opener.exists(backuppath): |
|
112 | if opener.exists(backuppath): | |
113 | opener.unlink(backuppath) |
|
113 | opener.unlink(backuppath) | |
114 | opener.unlink(journal) |
|
114 | opener.unlink(journal) | |
115 | try: |
|
115 | try: | |
116 | for f in backupfiles: |
|
116 | for f in backupfiles: | |
117 | if opener.exists(f): |
|
117 | if opener.exists(f): | |
118 | opener.unlink(f) |
|
118 | opener.unlink(f) | |
119 | except (IOError, OSError, error.Abort): |
|
119 | except (IOError, OSError, error.Abort): | |
120 | # only pure backup file remains, it is sage to ignore any error |
|
120 | # only pure backup file remains, it is sage to ignore any error | |
121 | pass |
|
121 | pass | |
122 |
|
122 | |||
123 |
|
123 | |||
124 | class transaction(util.transactional): |
|
124 | class transaction(util.transactional): | |
125 | def __init__( |
|
125 | def __init__( | |
126 | self, |
|
126 | self, | |
127 | report, |
|
127 | report, | |
128 | opener, |
|
128 | opener, | |
129 | vfsmap, |
|
129 | vfsmap, | |
130 | journalname, |
|
130 | journalname, | |
131 | undoname=None, |
|
131 | undoname=None, | |
132 | after=None, |
|
132 | after=None, | |
133 | createmode=None, |
|
133 | createmode=None, | |
134 | validator=None, |
|
134 | validator=None, | |
135 | releasefn=None, |
|
135 | releasefn=None, | |
136 | checkambigfiles=None, |
|
136 | checkambigfiles=None, | |
137 | name='<unnamed>', |
|
137 | name='<unnamed>', | |
138 | ): |
|
138 | ): | |
139 | """Begin a new transaction |
|
139 | """Begin a new transaction | |
140 |
|
140 | |||
141 | Begins a new transaction that allows rolling back writes in the event of |
|
141 | Begins a new transaction that allows rolling back writes in the event of | |
142 | an exception. |
|
142 | an exception. | |
143 |
|
143 | |||
144 | * `after`: called after the transaction has been committed |
|
144 | * `after`: called after the transaction has been committed | |
145 | * `createmode`: the mode of the journal file that will be created |
|
145 | * `createmode`: the mode of the journal file that will be created | |
146 | * `releasefn`: called after releasing (with transaction and result) |
|
146 | * `releasefn`: called after releasing (with transaction and result) | |
147 |
|
147 | |||
148 | `checkambigfiles` is a set of (path, vfs-location) tuples, |
|
148 | `checkambigfiles` is a set of (path, vfs-location) tuples, | |
149 | which determine whether file stat ambiguity should be avoided |
|
149 | which determine whether file stat ambiguity should be avoided | |
150 | for corresponded files. |
|
150 | for corresponded files. | |
151 | """ |
|
151 | """ | |
152 | self._count = 1 |
|
152 | self._count = 1 | |
153 | self._usages = 1 |
|
153 | self._usages = 1 | |
154 | self._report = report |
|
154 | self._report = report | |
155 | # a vfs to the store content |
|
155 | # a vfs to the store content | |
156 | self._opener = opener |
|
156 | self._opener = opener | |
157 | # a map to access file in various {location -> vfs} |
|
157 | # a map to access file in various {location -> vfs} | |
158 | vfsmap = vfsmap.copy() |
|
158 | vfsmap = vfsmap.copy() | |
159 | vfsmap[b''] = opener # set default value |
|
159 | vfsmap[b''] = opener # set default value | |
160 | self._vfsmap = vfsmap |
|
160 | self._vfsmap = vfsmap | |
161 | self._after = after |
|
161 | self._after = after | |
162 | self._offsetmap = {} |
|
162 | self._offsetmap = {} | |
163 | self._newfiles = set() |
|
163 | self._newfiles = set() | |
164 | self._journal = journalname |
|
164 | self._journal = journalname | |
165 | self._undoname = undoname |
|
165 | self._undoname = undoname | |
166 | self._queue = [] |
|
166 | self._queue = [] | |
167 | # A callback to do something just after releasing transaction. |
|
167 | # A callback to do something just after releasing transaction. | |
168 | if releasefn is None: |
|
168 | if releasefn is None: | |
169 | releasefn = lambda tr, success: None |
|
169 | releasefn = lambda tr, success: None | |
170 | self._releasefn = releasefn |
|
170 | self._releasefn = releasefn | |
171 |
|
171 | |||
172 | self._checkambigfiles = set() |
|
172 | self._checkambigfiles = set() | |
173 | if checkambigfiles: |
|
173 | if checkambigfiles: | |
174 | self._checkambigfiles.update(checkambigfiles) |
|
174 | self._checkambigfiles.update(checkambigfiles) | |
175 |
|
175 | |||
176 | self._names = [name] |
|
176 | self._names = [name] | |
177 |
|
177 | |||
178 | # A dict dedicated to precisely tracking the changes introduced in the |
|
178 | # A dict dedicated to precisely tracking the changes introduced in the | |
179 | # transaction. |
|
179 | # transaction. | |
180 | self.changes = {} |
|
180 | self.changes = {} | |
181 |
|
181 | |||
182 | # a dict of arguments to be passed to hooks |
|
182 | # a dict of arguments to be passed to hooks | |
183 | self.hookargs = {} |
|
183 | self.hookargs = {} | |
184 | self._file = opener.open(self._journal, b"w+") |
|
184 | self._file = opener.open(self._journal, b"w+") | |
185 |
|
185 | |||
186 | # a list of ('location', 'path', 'backuppath', cache) entries. |
|
186 | # a list of ('location', 'path', 'backuppath', cache) entries. | |
187 | # - if 'backuppath' is empty, no file existed at backup time |
|
187 | # - if 'backuppath' is empty, no file existed at backup time | |
188 | # - if 'path' is empty, this is a temporary transaction file |
|
188 | # - if 'path' is empty, this is a temporary transaction file | |
189 | # - if 'location' is not empty, the path is outside main opener reach. |
|
189 | # - if 'location' is not empty, the path is outside main opener reach. | |
190 | # use 'location' value as a key in a vfsmap to find the right 'vfs' |
|
190 | # use 'location' value as a key in a vfsmap to find the right 'vfs' | |
191 | # (cache is currently unused) |
|
191 | # (cache is currently unused) | |
192 | self._backupentries = [] |
|
192 | self._backupentries = [] | |
193 | self._backupmap = {} |
|
193 | self._backupmap = {} | |
194 | self._backupjournal = b"%s.backupfiles" % self._journal |
|
194 | self._backupjournal = b"%s.backupfiles" % self._journal | |
195 | self._backupsfile = opener.open(self._backupjournal, b'w') |
|
195 | self._backupsfile = opener.open(self._backupjournal, b'w') | |
196 | self._backupsfile.write(b'%d\n' % version) |
|
196 | self._backupsfile.write(b'%d\n' % version) | |
197 |
|
197 | |||
198 | if createmode is not None: |
|
198 | if createmode is not None: | |
199 | opener.chmod(self._journal, createmode & 0o666) |
|
199 | opener.chmod(self._journal, createmode & 0o666) | |
200 | opener.chmod(self._backupjournal, createmode & 0o666) |
|
200 | opener.chmod(self._backupjournal, createmode & 0o666) | |
201 |
|
201 | |||
202 | # hold file generations to be performed on commit |
|
202 | # hold file generations to be performed on commit | |
203 | self._filegenerators = {} |
|
203 | self._filegenerators = {} | |
204 | # hold callback to write pending data for hooks |
|
204 | # hold callback to write pending data for hooks | |
205 | self._pendingcallback = {} |
|
205 | self._pendingcallback = {} | |
206 | # True is any pending data have been written ever |
|
206 | # True is any pending data have been written ever | |
207 | self._anypending = False |
|
207 | self._anypending = False | |
208 | # holds callback to call when writing the transaction |
|
208 | # holds callback to call when writing the transaction | |
209 | self._finalizecallback = {} |
|
209 | self._finalizecallback = {} | |
210 | # holds callback to call when validating the transaction |
|
210 | # holds callback to call when validating the transaction | |
211 | # should raise exception if anything is wrong |
|
211 | # should raise exception if anything is wrong | |
212 | self._validatecallback = {} |
|
212 | self._validatecallback = {} | |
213 | if validator is not None: |
|
213 | if validator is not None: | |
214 | self._validatecallback[b'001-userhooks'] = validator |
|
214 | self._validatecallback[b'001-userhooks'] = validator | |
215 | # hold callback for post transaction close |
|
215 | # hold callback for post transaction close | |
216 | self._postclosecallback = {} |
|
216 | self._postclosecallback = {} | |
217 | # holds callbacks to call during abort |
|
217 | # holds callbacks to call during abort | |
218 | self._abortcallback = {} |
|
218 | self._abortcallback = {} | |
219 |
|
219 | |||
220 | def __repr__(self): |
|
220 | def __repr__(self): | |
221 | name = '/'.join(self._names) |
|
221 | name = '/'.join(self._names) | |
222 | return '<transaction name=%s, count=%d, usages=%d>' % ( |
|
222 | return '<transaction name=%s, count=%d, usages=%d>' % ( | |
223 | name, |
|
223 | name, | |
224 | self._count, |
|
224 | self._count, | |
225 | self._usages, |
|
225 | self._usages, | |
226 | ) |
|
226 | ) | |
227 |
|
227 | |||
228 | def __del__(self): |
|
228 | def __del__(self): | |
229 | if self._journal: |
|
229 | if self._journal: | |
230 | self._abort() |
|
230 | self._abort() | |
231 |
|
231 | |||
|
232 | @property | |||
|
233 | def finalized(self): | |||
|
234 | return self._finalizecallback is None | |||
|
235 | ||||
232 | @active |
|
236 | @active | |
233 | def startgroup(self): |
|
237 | def startgroup(self): | |
234 | """delay registration of file entry |
|
238 | """delay registration of file entry | |
235 |
|
239 | |||
236 | This is used by strip to delay vision of strip offset. The transaction |
|
240 | This is used by strip to delay vision of strip offset. The transaction | |
237 | sees either none or all of the strip actions to be done.""" |
|
241 | sees either none or all of the strip actions to be done.""" | |
238 | self._queue.append([]) |
|
242 | self._queue.append([]) | |
239 |
|
243 | |||
240 | @active |
|
244 | @active | |
241 | def endgroup(self): |
|
245 | def endgroup(self): | |
242 | """apply delayed registration of file entry. |
|
246 | """apply delayed registration of file entry. | |
243 |
|
247 | |||
244 | This is used by strip to delay vision of strip offset. The transaction |
|
248 | This is used by strip to delay vision of strip offset. The transaction | |
245 | sees either none or all of the strip actions to be done.""" |
|
249 | sees either none or all of the strip actions to be done.""" | |
246 | q = self._queue.pop() |
|
250 | q = self._queue.pop() | |
247 | for f, o in q: |
|
251 | for f, o in q: | |
248 | self._addentry(f, o) |
|
252 | self._addentry(f, o) | |
249 |
|
253 | |||
250 | @active |
|
254 | @active | |
251 | def add(self, file, offset): |
|
255 | def add(self, file, offset): | |
252 | """record the state of an append-only file before update""" |
|
256 | """record the state of an append-only file before update""" | |
253 | if ( |
|
257 | if ( | |
254 | file in self._newfiles |
|
258 | file in self._newfiles | |
255 | or file in self._offsetmap |
|
259 | or file in self._offsetmap | |
256 | or file in self._backupmap |
|
260 | or file in self._backupmap | |
257 | ): |
|
261 | ): | |
258 | return |
|
262 | return | |
259 | if self._queue: |
|
263 | if self._queue: | |
260 | self._queue[-1].append((file, offset)) |
|
264 | self._queue[-1].append((file, offset)) | |
261 | return |
|
265 | return | |
262 |
|
266 | |||
263 | self._addentry(file, offset) |
|
267 | self._addentry(file, offset) | |
264 |
|
268 | |||
265 | def _addentry(self, file, offset): |
|
269 | def _addentry(self, file, offset): | |
266 | """add a append-only entry to memory and on-disk state""" |
|
270 | """add a append-only entry to memory and on-disk state""" | |
267 | if ( |
|
271 | if ( | |
268 | file in self._newfiles |
|
272 | file in self._newfiles | |
269 | or file in self._offsetmap |
|
273 | or file in self._offsetmap | |
270 | or file in self._backupmap |
|
274 | or file in self._backupmap | |
271 | ): |
|
275 | ): | |
272 | return |
|
276 | return | |
273 | if offset: |
|
277 | if offset: | |
274 | self._offsetmap[file] = offset |
|
278 | self._offsetmap[file] = offset | |
275 | else: |
|
279 | else: | |
276 | self._newfiles.add(file) |
|
280 | self._newfiles.add(file) | |
277 | # add enough data to the journal to do the truncate |
|
281 | # add enough data to the journal to do the truncate | |
278 | self._file.write(b"%s\0%d\n" % (file, offset)) |
|
282 | self._file.write(b"%s\0%d\n" % (file, offset)) | |
279 | self._file.flush() |
|
283 | self._file.flush() | |
280 |
|
284 | |||
281 | @active |
|
285 | @active | |
282 | def addbackup(self, file, hardlink=True, location=b''): |
|
286 | def addbackup(self, file, hardlink=True, location=b''): | |
283 | """Adds a backup of the file to the transaction |
|
287 | """Adds a backup of the file to the transaction | |
284 |
|
288 | |||
285 | Calling addbackup() creates a hardlink backup of the specified file |
|
289 | Calling addbackup() creates a hardlink backup of the specified file | |
286 | that is used to recover the file in the event of the transaction |
|
290 | that is used to recover the file in the event of the transaction | |
287 | aborting. |
|
291 | aborting. | |
288 |
|
292 | |||
289 | * `file`: the file path, relative to .hg/store |
|
293 | * `file`: the file path, relative to .hg/store | |
290 | * `hardlink`: use a hardlink to quickly create the backup |
|
294 | * `hardlink`: use a hardlink to quickly create the backup | |
291 | """ |
|
295 | """ | |
292 | if self._queue: |
|
296 | if self._queue: | |
293 | msg = b'cannot use transaction.addbackup inside "group"' |
|
297 | msg = b'cannot use transaction.addbackup inside "group"' | |
294 | raise error.ProgrammingError(msg) |
|
298 | raise error.ProgrammingError(msg) | |
295 |
|
299 | |||
296 | if ( |
|
300 | if ( | |
297 | file in self._newfiles |
|
301 | file in self._newfiles | |
298 | or file in self._offsetmap |
|
302 | or file in self._offsetmap | |
299 | or file in self._backupmap |
|
303 | or file in self._backupmap | |
300 | ): |
|
304 | ): | |
301 | return |
|
305 | return | |
302 | vfs = self._vfsmap[location] |
|
306 | vfs = self._vfsmap[location] | |
303 | dirname, filename = vfs.split(file) |
|
307 | dirname, filename = vfs.split(file) | |
304 | backupfilename = b"%s.backup.%s" % (self._journal, filename) |
|
308 | backupfilename = b"%s.backup.%s" % (self._journal, filename) | |
305 | backupfile = vfs.reljoin(dirname, backupfilename) |
|
309 | backupfile = vfs.reljoin(dirname, backupfilename) | |
306 | if vfs.exists(file): |
|
310 | if vfs.exists(file): | |
307 | filepath = vfs.join(file) |
|
311 | filepath = vfs.join(file) | |
308 | backuppath = vfs.join(backupfile) |
|
312 | backuppath = vfs.join(backupfile) | |
309 | util.copyfile(filepath, backuppath, hardlink=hardlink) |
|
313 | util.copyfile(filepath, backuppath, hardlink=hardlink) | |
310 | else: |
|
314 | else: | |
311 | backupfile = b'' |
|
315 | backupfile = b'' | |
312 |
|
316 | |||
313 | self._addbackupentry((location, file, backupfile, False)) |
|
317 | self._addbackupentry((location, file, backupfile, False)) | |
314 |
|
318 | |||
315 | def _addbackupentry(self, entry): |
|
319 | def _addbackupentry(self, entry): | |
316 | """register a new backup entry and write it to disk""" |
|
320 | """register a new backup entry and write it to disk""" | |
317 | self._backupentries.append(entry) |
|
321 | self._backupentries.append(entry) | |
318 | self._backupmap[entry[1]] = len(self._backupentries) - 1 |
|
322 | self._backupmap[entry[1]] = len(self._backupentries) - 1 | |
319 | self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry) |
|
323 | self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry) | |
320 | self._backupsfile.flush() |
|
324 | self._backupsfile.flush() | |
321 |
|
325 | |||
322 | @active |
|
326 | @active | |
323 | def registertmp(self, tmpfile, location=b''): |
|
327 | def registertmp(self, tmpfile, location=b''): | |
324 | """register a temporary transaction file |
|
328 | """register a temporary transaction file | |
325 |
|
329 | |||
326 | Such files will be deleted when the transaction exits (on both |
|
330 | Such files will be deleted when the transaction exits (on both | |
327 | failure and success). |
|
331 | failure and success). | |
328 | """ |
|
332 | """ | |
329 | self._addbackupentry((location, b'', tmpfile, False)) |
|
333 | self._addbackupentry((location, b'', tmpfile, False)) | |
330 |
|
334 | |||
331 | @active |
|
335 | @active | |
332 | def addfilegenerator( |
|
336 | def addfilegenerator( | |
333 | self, genid, filenames, genfunc, order=0, location=b'' |
|
337 | self, genid, filenames, genfunc, order=0, location=b'' | |
334 | ): |
|
338 | ): | |
335 | """add a function to generates some files at transaction commit |
|
339 | """add a function to generates some files at transaction commit | |
336 |
|
340 | |||
337 | The `genfunc` argument is a function capable of generating proper |
|
341 | The `genfunc` argument is a function capable of generating proper | |
338 | content of each entry in the `filename` tuple. |
|
342 | content of each entry in the `filename` tuple. | |
339 |
|
343 | |||
340 | At transaction close time, `genfunc` will be called with one file |
|
344 | At transaction close time, `genfunc` will be called with one file | |
341 | object argument per entries in `filenames`. |
|
345 | object argument per entries in `filenames`. | |
342 |
|
346 | |||
343 | The transaction itself is responsible for the backup, creation and |
|
347 | The transaction itself is responsible for the backup, creation and | |
344 | final write of such file. |
|
348 | final write of such file. | |
345 |
|
349 | |||
346 | The `genid` argument is used to ensure the same set of file is only |
|
350 | The `genid` argument is used to ensure the same set of file is only | |
347 | generated once. Call to `addfilegenerator` for a `genid` already |
|
351 | generated once. Call to `addfilegenerator` for a `genid` already | |
348 | present will overwrite the old entry. |
|
352 | present will overwrite the old entry. | |
349 |
|
353 | |||
350 | The `order` argument may be used to control the order in which multiple |
|
354 | The `order` argument may be used to control the order in which multiple | |
351 | generator will be executed. |
|
355 | generator will be executed. | |
352 |
|
356 | |||
353 | The `location` arguments may be used to indicate the files are located |
|
357 | The `location` arguments may be used to indicate the files are located | |
354 | outside of the the standard directory for transaction. It should match |
|
358 | outside of the the standard directory for transaction. It should match | |
355 | one of the key of the `transaction.vfsmap` dictionary. |
|
359 | one of the key of the `transaction.vfsmap` dictionary. | |
356 | """ |
|
360 | """ | |
357 | # For now, we are unable to do proper backup and restore of custom vfs |
|
361 | # For now, we are unable to do proper backup and restore of custom vfs | |
358 | # but for bookmarks that are handled outside this mechanism. |
|
362 | # but for bookmarks that are handled outside this mechanism. | |
359 | self._filegenerators[genid] = (order, filenames, genfunc, location) |
|
363 | self._filegenerators[genid] = (order, filenames, genfunc, location) | |
360 |
|
364 | |||
361 | @active |
|
365 | @active | |
362 | def removefilegenerator(self, genid): |
|
366 | def removefilegenerator(self, genid): | |
363 | """reverse of addfilegenerator, remove a file generator function""" |
|
367 | """reverse of addfilegenerator, remove a file generator function""" | |
364 | if genid in self._filegenerators: |
|
368 | if genid in self._filegenerators: | |
365 | del self._filegenerators[genid] |
|
369 | del self._filegenerators[genid] | |
366 |
|
370 | |||
367 | def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL): |
|
371 | def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL): | |
368 | # write files registered for generation |
|
372 | # write files registered for generation | |
369 | any = False |
|
373 | any = False | |
370 |
|
374 | |||
371 | if group == GEN_GROUP_ALL: |
|
375 | if group == GEN_GROUP_ALL: | |
372 | skip_post = skip_pre = False |
|
376 | skip_post = skip_pre = False | |
373 | else: |
|
377 | else: | |
374 | skip_pre = group == GEN_GROUP_POST_FINALIZE |
|
378 | skip_pre = group == GEN_GROUP_POST_FINALIZE | |
375 | skip_post = group == GEN_GROUP_PRE_FINALIZE |
|
379 | skip_post = group == GEN_GROUP_PRE_FINALIZE | |
376 |
|
380 | |||
377 | for id, entry in sorted(pycompat.iteritems(self._filegenerators)): |
|
381 | for id, entry in sorted(pycompat.iteritems(self._filegenerators)): | |
378 | any = True |
|
382 | any = True | |
379 | order, filenames, genfunc, location = entry |
|
383 | order, filenames, genfunc, location = entry | |
380 |
|
384 | |||
381 | # for generation at closing, check if it's before or after finalize |
|
385 | # for generation at closing, check if it's before or after finalize | |
382 | is_post = id in postfinalizegenerators |
|
386 | is_post = id in postfinalizegenerators | |
383 | if skip_post and is_post: |
|
387 | if skip_post and is_post: | |
384 | continue |
|
388 | continue | |
385 | elif skip_pre and not is_post: |
|
389 | elif skip_pre and not is_post: | |
386 | continue |
|
390 | continue | |
387 |
|
391 | |||
388 | vfs = self._vfsmap[location] |
|
392 | vfs = self._vfsmap[location] | |
389 | files = [] |
|
393 | files = [] | |
390 | try: |
|
394 | try: | |
391 | for name in filenames: |
|
395 | for name in filenames: | |
392 | name += suffix |
|
396 | name += suffix | |
393 | if suffix: |
|
397 | if suffix: | |
394 | self.registertmp(name, location=location) |
|
398 | self.registertmp(name, location=location) | |
395 | checkambig = False |
|
399 | checkambig = False | |
396 | else: |
|
400 | else: | |
397 | self.addbackup(name, location=location) |
|
401 | self.addbackup(name, location=location) | |
398 | checkambig = (name, location) in self._checkambigfiles |
|
402 | checkambig = (name, location) in self._checkambigfiles | |
399 | files.append( |
|
403 | files.append( | |
400 | vfs(name, b'w', atomictemp=True, checkambig=checkambig) |
|
404 | vfs(name, b'w', atomictemp=True, checkambig=checkambig) | |
401 | ) |
|
405 | ) | |
402 | genfunc(*files) |
|
406 | genfunc(*files) | |
403 | for f in files: |
|
407 | for f in files: | |
404 | f.close() |
|
408 | f.close() | |
405 | # skip discard() loop since we're sure no open file remains |
|
409 | # skip discard() loop since we're sure no open file remains | |
406 | del files[:] |
|
410 | del files[:] | |
407 | finally: |
|
411 | finally: | |
408 | for f in files: |
|
412 | for f in files: | |
409 | f.discard() |
|
413 | f.discard() | |
410 | return any |
|
414 | return any | |
411 |
|
415 | |||
412 | @active |
|
416 | @active | |
413 | def findoffset(self, file): |
|
417 | def findoffset(self, file): | |
414 | if file in self._newfiles: |
|
418 | if file in self._newfiles: | |
415 | return 0 |
|
419 | return 0 | |
416 | return self._offsetmap.get(file) |
|
420 | return self._offsetmap.get(file) | |
417 |
|
421 | |||
418 | @active |
|
422 | @active | |
419 | def readjournal(self): |
|
423 | def readjournal(self): | |
420 | self._file.seek(0) |
|
424 | self._file.seek(0) | |
421 | entries = [] |
|
425 | entries = [] | |
422 | for l in self._file.readlines(): |
|
426 | for l in self._file.readlines(): | |
423 | file, troffset = l.split(b'\0') |
|
427 | file, troffset = l.split(b'\0') | |
424 | entries.append((file, int(troffset))) |
|
428 | entries.append((file, int(troffset))) | |
425 | return entries |
|
429 | return entries | |
426 |
|
430 | |||
427 | @active |
|
431 | @active | |
428 | def replace(self, file, offset): |
|
432 | def replace(self, file, offset): | |
429 | """ |
|
433 | """ | |
430 | replace can only replace already committed entries |
|
434 | replace can only replace already committed entries | |
431 | that are not pending in the queue |
|
435 | that are not pending in the queue | |
432 | """ |
|
436 | """ | |
433 | if file in self._newfiles: |
|
437 | if file in self._newfiles: | |
434 | if not offset: |
|
438 | if not offset: | |
435 | return |
|
439 | return | |
436 | self._newfiles.remove(file) |
|
440 | self._newfiles.remove(file) | |
437 | self._offsetmap[file] = offset |
|
441 | self._offsetmap[file] = offset | |
438 | elif file in self._offsetmap: |
|
442 | elif file in self._offsetmap: | |
439 | if not offset: |
|
443 | if not offset: | |
440 | del self._offsetmap[file] |
|
444 | del self._offsetmap[file] | |
441 | self._newfiles.add(file) |
|
445 | self._newfiles.add(file) | |
442 | else: |
|
446 | else: | |
443 | self._offsetmap[file] = offset |
|
447 | self._offsetmap[file] = offset | |
444 | else: |
|
448 | else: | |
445 | raise KeyError(file) |
|
449 | raise KeyError(file) | |
446 | self._file.write(b"%s\0%d\n" % (file, offset)) |
|
450 | self._file.write(b"%s\0%d\n" % (file, offset)) | |
447 | self._file.flush() |
|
451 | self._file.flush() | |
448 |
|
452 | |||
449 | @active |
|
453 | @active | |
450 | def nest(self, name='<unnamed>'): |
|
454 | def nest(self, name='<unnamed>'): | |
451 | self._count += 1 |
|
455 | self._count += 1 | |
452 | self._usages += 1 |
|
456 | self._usages += 1 | |
453 | self._names.append(name) |
|
457 | self._names.append(name) | |
454 | return self |
|
458 | return self | |
455 |
|
459 | |||
456 | def release(self): |
|
460 | def release(self): | |
457 | if self._count > 0: |
|
461 | if self._count > 0: | |
458 | self._usages -= 1 |
|
462 | self._usages -= 1 | |
459 | if self._names: |
|
463 | if self._names: | |
460 | self._names.pop() |
|
464 | self._names.pop() | |
461 | # if the transaction scopes are left without being closed, fail |
|
465 | # if the transaction scopes are left without being closed, fail | |
462 | if self._count > 0 and self._usages == 0: |
|
466 | if self._count > 0 and self._usages == 0: | |
463 | self._abort() |
|
467 | self._abort() | |
464 |
|
468 | |||
465 | def running(self): |
|
469 | def running(self): | |
466 | return self._count > 0 |
|
470 | return self._count > 0 | |
467 |
|
471 | |||
468 | def addpending(self, category, callback): |
|
472 | def addpending(self, category, callback): | |
469 | """add a callback to be called when the transaction is pending |
|
473 | """add a callback to be called when the transaction is pending | |
470 |
|
474 | |||
471 | The transaction will be given as callback's first argument. |
|
475 | The transaction will be given as callback's first argument. | |
472 |
|
476 | |||
473 | Category is a unique identifier to allow overwriting an old callback |
|
477 | Category is a unique identifier to allow overwriting an old callback | |
474 | with a newer callback. |
|
478 | with a newer callback. | |
475 | """ |
|
479 | """ | |
476 | self._pendingcallback[category] = callback |
|
480 | self._pendingcallback[category] = callback | |
477 |
|
481 | |||
478 | @active |
|
482 | @active | |
479 | def writepending(self): |
|
483 | def writepending(self): | |
480 | """write pending file to temporary version |
|
484 | """write pending file to temporary version | |
481 |
|
485 | |||
482 | This is used to allow hooks to view a transaction before commit""" |
|
486 | This is used to allow hooks to view a transaction before commit""" | |
483 | categories = sorted(self._pendingcallback) |
|
487 | categories = sorted(self._pendingcallback) | |
484 | for cat in categories: |
|
488 | for cat in categories: | |
485 | # remove callback since the data will have been flushed |
|
489 | # remove callback since the data will have been flushed | |
486 | any = self._pendingcallback.pop(cat)(self) |
|
490 | any = self._pendingcallback.pop(cat)(self) | |
487 | self._anypending = self._anypending or any |
|
491 | self._anypending = self._anypending or any | |
488 | self._anypending |= self._generatefiles(suffix=b'.pending') |
|
492 | self._anypending |= self._generatefiles(suffix=b'.pending') | |
489 | return self._anypending |
|
493 | return self._anypending | |
490 |
|
494 | |||
491 | @active |
|
495 | @active | |
492 | def hasfinalize(self, category): |
|
496 | def hasfinalize(self, category): | |
493 | """check is a callback already exist for a category""" |
|
497 | """check is a callback already exist for a category""" | |
494 | return category in self._finalizecallback |
|
498 | return category in self._finalizecallback | |
495 |
|
499 | |||
496 | @active |
|
500 | @active | |
497 | def addfinalize(self, category, callback): |
|
501 | def addfinalize(self, category, callback): | |
498 | """add a callback to be called when the transaction is closed |
|
502 | """add a callback to be called when the transaction is closed | |
499 |
|
503 | |||
500 | The transaction will be given as callback's first argument. |
|
504 | The transaction will be given as callback's first argument. | |
501 |
|
505 | |||
502 | Category is a unique identifier to allow overwriting old callbacks with |
|
506 | Category is a unique identifier to allow overwriting old callbacks with | |
503 | newer callbacks. |
|
507 | newer callbacks. | |
504 | """ |
|
508 | """ | |
505 | self._finalizecallback[category] = callback |
|
509 | self._finalizecallback[category] = callback | |
506 |
|
510 | |||
507 | @active |
|
511 | @active | |
508 | def addpostclose(self, category, callback): |
|
512 | def addpostclose(self, category, callback): | |
509 | """add or replace a callback to be called after the transaction closed |
|
513 | """add or replace a callback to be called after the transaction closed | |
510 |
|
514 | |||
511 | The transaction will be given as callback's first argument. |
|
515 | The transaction will be given as callback's first argument. | |
512 |
|
516 | |||
513 | Category is a unique identifier to allow overwriting an old callback |
|
517 | Category is a unique identifier to allow overwriting an old callback | |
514 | with a newer callback. |
|
518 | with a newer callback. | |
515 | """ |
|
519 | """ | |
516 | self._postclosecallback[category] = callback |
|
520 | self._postclosecallback[category] = callback | |
517 |
|
521 | |||
518 | @active |
|
522 | @active | |
519 | def getpostclose(self, category): |
|
523 | def getpostclose(self, category): | |
520 | """return a postclose callback added before, or None""" |
|
524 | """return a postclose callback added before, or None""" | |
521 | return self._postclosecallback.get(category, None) |
|
525 | return self._postclosecallback.get(category, None) | |
522 |
|
526 | |||
523 | @active |
|
527 | @active | |
524 | def addabort(self, category, callback): |
|
528 | def addabort(self, category, callback): | |
525 | """add a callback to be called when the transaction is aborted. |
|
529 | """add a callback to be called when the transaction is aborted. | |
526 |
|
530 | |||
527 | The transaction will be given as the first argument to the callback. |
|
531 | The transaction will be given as the first argument to the callback. | |
528 |
|
532 | |||
529 | Category is a unique identifier to allow overwriting an old callback |
|
533 | Category is a unique identifier to allow overwriting an old callback | |
530 | with a newer callback. |
|
534 | with a newer callback. | |
531 | """ |
|
535 | """ | |
532 | self._abortcallback[category] = callback |
|
536 | self._abortcallback[category] = callback | |
533 |
|
537 | |||
534 | @active |
|
538 | @active | |
535 | def addvalidator(self, category, callback): |
|
539 | def addvalidator(self, category, callback): | |
536 | """adds a callback to be called when validating the transaction. |
|
540 | """adds a callback to be called when validating the transaction. | |
537 |
|
541 | |||
538 | The transaction will be given as the first argument to the callback. |
|
542 | The transaction will be given as the first argument to the callback. | |
539 |
|
543 | |||
540 | callback should raise exception if to abort transaction""" |
|
544 | callback should raise exception if to abort transaction""" | |
541 | self._validatecallback[category] = callback |
|
545 | self._validatecallback[category] = callback | |
542 |
|
546 | |||
543 | @active |
|
547 | @active | |
544 | def close(self): |
|
548 | def close(self): | |
545 | '''commit the transaction''' |
|
549 | '''commit the transaction''' | |
546 | if self._count == 1: |
|
550 | if self._count == 1: | |
547 | for category in sorted(self._validatecallback): |
|
551 | for category in sorted(self._validatecallback): | |
548 | self._validatecallback[category](self) |
|
552 | self._validatecallback[category](self) | |
549 | self._validatecallback = None # Help prevent cycles. |
|
553 | self._validatecallback = None # Help prevent cycles. | |
550 | self._generatefiles(group=GEN_GROUP_PRE_FINALIZE) |
|
554 | self._generatefiles(group=GEN_GROUP_PRE_FINALIZE) | |
551 | while self._finalizecallback: |
|
555 | while self._finalizecallback: | |
552 | callbacks = self._finalizecallback |
|
556 | callbacks = self._finalizecallback | |
553 | self._finalizecallback = {} |
|
557 | self._finalizecallback = {} | |
554 | categories = sorted(callbacks) |
|
558 | categories = sorted(callbacks) | |
555 | for cat in categories: |
|
559 | for cat in categories: | |
556 | callbacks[cat](self) |
|
560 | callbacks[cat](self) | |
557 | # Prevent double usage and help clear cycles. |
|
561 | # Prevent double usage and help clear cycles. | |
558 | self._finalizecallback = None |
|
562 | self._finalizecallback = None | |
559 | self._generatefiles(group=GEN_GROUP_POST_FINALIZE) |
|
563 | self._generatefiles(group=GEN_GROUP_POST_FINALIZE) | |
560 |
|
564 | |||
561 | self._count -= 1 |
|
565 | self._count -= 1 | |
562 | if self._count != 0: |
|
566 | if self._count != 0: | |
563 | return |
|
567 | return | |
564 | self._file.close() |
|
568 | self._file.close() | |
565 | self._backupsfile.close() |
|
569 | self._backupsfile.close() | |
566 | # cleanup temporary files |
|
570 | # cleanup temporary files | |
567 | for l, f, b, c in self._backupentries: |
|
571 | for l, f, b, c in self._backupentries: | |
568 | if l not in self._vfsmap and c: |
|
572 | if l not in self._vfsmap and c: | |
569 | self._report( |
|
573 | self._report( | |
570 | b"couldn't remove %s: unknown cache location %s\n" % (b, l) |
|
574 | b"couldn't remove %s: unknown cache location %s\n" % (b, l) | |
571 | ) |
|
575 | ) | |
572 | continue |
|
576 | continue | |
573 | vfs = self._vfsmap[l] |
|
577 | vfs = self._vfsmap[l] | |
574 | if not f and b and vfs.exists(b): |
|
578 | if not f and b and vfs.exists(b): | |
575 | try: |
|
579 | try: | |
576 | vfs.unlink(b) |
|
580 | vfs.unlink(b) | |
577 | except (IOError, OSError, error.Abort) as inst: |
|
581 | except (IOError, OSError, error.Abort) as inst: | |
578 | if not c: |
|
582 | if not c: | |
579 | raise |
|
583 | raise | |
580 | # Abort may be raise by read only opener |
|
584 | # Abort may be raise by read only opener | |
581 | self._report( |
|
585 | self._report( | |
582 | b"couldn't remove %s: %s\n" % (vfs.join(b), inst) |
|
586 | b"couldn't remove %s: %s\n" % (vfs.join(b), inst) | |
583 | ) |
|
587 | ) | |
584 | self._offsetmap = {} |
|
588 | self._offsetmap = {} | |
585 | self._newfiles = set() |
|
589 | self._newfiles = set() | |
586 | self._writeundo() |
|
590 | self._writeundo() | |
587 | if self._after: |
|
591 | if self._after: | |
588 | self._after() |
|
592 | self._after() | |
589 | self._after = None # Help prevent cycles. |
|
593 | self._after = None # Help prevent cycles. | |
590 | if self._opener.isfile(self._backupjournal): |
|
594 | if self._opener.isfile(self._backupjournal): | |
591 | self._opener.unlink(self._backupjournal) |
|
595 | self._opener.unlink(self._backupjournal) | |
592 | if self._opener.isfile(self._journal): |
|
596 | if self._opener.isfile(self._journal): | |
593 | self._opener.unlink(self._journal) |
|
597 | self._opener.unlink(self._journal) | |
594 | for l, _f, b, c in self._backupentries: |
|
598 | for l, _f, b, c in self._backupentries: | |
595 | if l not in self._vfsmap and c: |
|
599 | if l not in self._vfsmap and c: | |
596 | self._report( |
|
600 | self._report( | |
597 | b"couldn't remove %s: unknown cache location" |
|
601 | b"couldn't remove %s: unknown cache location" | |
598 | b"%s\n" % (b, l) |
|
602 | b"%s\n" % (b, l) | |
599 | ) |
|
603 | ) | |
600 | continue |
|
604 | continue | |
601 | vfs = self._vfsmap[l] |
|
605 | vfs = self._vfsmap[l] | |
602 | if b and vfs.exists(b): |
|
606 | if b and vfs.exists(b): | |
603 | try: |
|
607 | try: | |
604 | vfs.unlink(b) |
|
608 | vfs.unlink(b) | |
605 | except (IOError, OSError, error.Abort) as inst: |
|
609 | except (IOError, OSError, error.Abort) as inst: | |
606 | if not c: |
|
610 | if not c: | |
607 | raise |
|
611 | raise | |
608 | # Abort may be raise by read only opener |
|
612 | # Abort may be raise by read only opener | |
609 | self._report( |
|
613 | self._report( | |
610 | b"couldn't remove %s: %s\n" % (vfs.join(b), inst) |
|
614 | b"couldn't remove %s: %s\n" % (vfs.join(b), inst) | |
611 | ) |
|
615 | ) | |
612 | self._backupentries = [] |
|
616 | self._backupentries = [] | |
613 | self._journal = None |
|
617 | self._journal = None | |
614 |
|
618 | |||
615 | self._releasefn(self, True) # notify success of closing transaction |
|
619 | self._releasefn(self, True) # notify success of closing transaction | |
616 | self._releasefn = None # Help prevent cycles. |
|
620 | self._releasefn = None # Help prevent cycles. | |
617 |
|
621 | |||
618 | # run post close action |
|
622 | # run post close action | |
619 | categories = sorted(self._postclosecallback) |
|
623 | categories = sorted(self._postclosecallback) | |
620 | for cat in categories: |
|
624 | for cat in categories: | |
621 | self._postclosecallback[cat](self) |
|
625 | self._postclosecallback[cat](self) | |
622 | # Prevent double usage and help clear cycles. |
|
626 | # Prevent double usage and help clear cycles. | |
623 | self._postclosecallback = None |
|
627 | self._postclosecallback = None | |
624 |
|
628 | |||
625 | @active |
|
629 | @active | |
626 | def abort(self): |
|
630 | def abort(self): | |
627 | """abort the transaction (generally called on error, or when the |
|
631 | """abort the transaction (generally called on error, or when the | |
628 | transaction is not explicitly committed before going out of |
|
632 | transaction is not explicitly committed before going out of | |
629 | scope)""" |
|
633 | scope)""" | |
630 | self._abort() |
|
634 | self._abort() | |
631 |
|
635 | |||
632 | def _writeundo(self): |
|
636 | def _writeundo(self): | |
633 | """write transaction data for possible future undo call""" |
|
637 | """write transaction data for possible future undo call""" | |
634 | if self._undoname is None: |
|
638 | if self._undoname is None: | |
635 | return |
|
639 | return | |
636 |
|
640 | |||
637 | undo_backup_path = b"%s.backupfiles" % self._undoname |
|
641 | undo_backup_path = b"%s.backupfiles" % self._undoname | |
638 | undobackupfile = self._opener.open(undo_backup_path, b'w') |
|
642 | undobackupfile = self._opener.open(undo_backup_path, b'w') | |
639 | undobackupfile.write(b'%d\n' % version) |
|
643 | undobackupfile.write(b'%d\n' % version) | |
640 | for l, f, b, c in self._backupentries: |
|
644 | for l, f, b, c in self._backupentries: | |
641 | if not f: # temporary file |
|
645 | if not f: # temporary file | |
642 | continue |
|
646 | continue | |
643 | if not b: |
|
647 | if not b: | |
644 | u = b'' |
|
648 | u = b'' | |
645 | else: |
|
649 | else: | |
646 | if l not in self._vfsmap and c: |
|
650 | if l not in self._vfsmap and c: | |
647 | self._report( |
|
651 | self._report( | |
648 | b"couldn't remove %s: unknown cache location" |
|
652 | b"couldn't remove %s: unknown cache location" | |
649 | b"%s\n" % (b, l) |
|
653 | b"%s\n" % (b, l) | |
650 | ) |
|
654 | ) | |
651 | continue |
|
655 | continue | |
652 | vfs = self._vfsmap[l] |
|
656 | vfs = self._vfsmap[l] | |
653 | base, name = vfs.split(b) |
|
657 | base, name = vfs.split(b) | |
654 | assert name.startswith(self._journal), name |
|
658 | assert name.startswith(self._journal), name | |
655 | uname = name.replace(self._journal, self._undoname, 1) |
|
659 | uname = name.replace(self._journal, self._undoname, 1) | |
656 | u = vfs.reljoin(base, uname) |
|
660 | u = vfs.reljoin(base, uname) | |
657 | util.copyfile(vfs.join(b), vfs.join(u), hardlink=True) |
|
661 | util.copyfile(vfs.join(b), vfs.join(u), hardlink=True) | |
658 | undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c)) |
|
662 | undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c)) | |
659 | undobackupfile.close() |
|
663 | undobackupfile.close() | |
660 |
|
664 | |||
661 | def _abort(self): |
|
665 | def _abort(self): | |
662 | entries = self.readjournal() |
|
666 | entries = self.readjournal() | |
663 | self._count = 0 |
|
667 | self._count = 0 | |
664 | self._usages = 0 |
|
668 | self._usages = 0 | |
665 | self._file.close() |
|
669 | self._file.close() | |
666 | self._backupsfile.close() |
|
670 | self._backupsfile.close() | |
667 |
|
671 | |||
668 | try: |
|
672 | try: | |
669 | if not entries and not self._backupentries: |
|
673 | if not entries and not self._backupentries: | |
670 | if self._backupjournal: |
|
674 | if self._backupjournal: | |
671 | self._opener.unlink(self._backupjournal) |
|
675 | self._opener.unlink(self._backupjournal) | |
672 | if self._journal: |
|
676 | if self._journal: | |
673 | self._opener.unlink(self._journal) |
|
677 | self._opener.unlink(self._journal) | |
674 | return |
|
678 | return | |
675 |
|
679 | |||
676 | self._report(_(b"transaction abort!\n")) |
|
680 | self._report(_(b"transaction abort!\n")) | |
677 |
|
681 | |||
678 | try: |
|
682 | try: | |
679 | for cat in sorted(self._abortcallback): |
|
683 | for cat in sorted(self._abortcallback): | |
680 | self._abortcallback[cat](self) |
|
684 | self._abortcallback[cat](self) | |
681 | # Prevent double usage and help clear cycles. |
|
685 | # Prevent double usage and help clear cycles. | |
682 | self._abortcallback = None |
|
686 | self._abortcallback = None | |
683 | _playback( |
|
687 | _playback( | |
684 | self._journal, |
|
688 | self._journal, | |
685 | self._report, |
|
689 | self._report, | |
686 | self._opener, |
|
690 | self._opener, | |
687 | self._vfsmap, |
|
691 | self._vfsmap, | |
688 | entries, |
|
692 | entries, | |
689 | self._backupentries, |
|
693 | self._backupentries, | |
690 | False, |
|
694 | False, | |
691 | checkambigfiles=self._checkambigfiles, |
|
695 | checkambigfiles=self._checkambigfiles, | |
692 | ) |
|
696 | ) | |
693 | self._report(_(b"rollback completed\n")) |
|
697 | self._report(_(b"rollback completed\n")) | |
694 | except BaseException as exc: |
|
698 | except BaseException as exc: | |
695 | self._report(_(b"rollback failed - please run hg recover\n")) |
|
699 | self._report(_(b"rollback failed - please run hg recover\n")) | |
696 | self._report( |
|
700 | self._report( | |
697 | _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc) |
|
701 | _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc) | |
698 | ) |
|
702 | ) | |
699 | finally: |
|
703 | finally: | |
700 | self._journal = None |
|
704 | self._journal = None | |
701 | self._releasefn(self, False) # notify failure of transaction |
|
705 | self._releasefn(self, False) # notify failure of transaction | |
702 | self._releasefn = None # Help prevent cycles. |
|
706 | self._releasefn = None # Help prevent cycles. | |
703 |
|
707 | |||
704 |
|
708 | |||
705 | BAD_VERSION_MSG = _( |
|
709 | BAD_VERSION_MSG = _( | |
706 | b"journal was created by a different version of Mercurial\n" |
|
710 | b"journal was created by a different version of Mercurial\n" | |
707 | ) |
|
711 | ) | |
708 |
|
712 | |||
709 |
|
713 | |||
710 | def rollback(opener, vfsmap, file, report, checkambigfiles=None): |
|
714 | def rollback(opener, vfsmap, file, report, checkambigfiles=None): | |
711 | """Rolls back the transaction contained in the given file |
|
715 | """Rolls back the transaction contained in the given file | |
712 |
|
716 | |||
713 | Reads the entries in the specified file, and the corresponding |
|
717 | Reads the entries in the specified file, and the corresponding | |
714 | '*.backupfiles' file, to recover from an incomplete transaction. |
|
718 | '*.backupfiles' file, to recover from an incomplete transaction. | |
715 |
|
719 | |||
716 | * `file`: a file containing a list of entries, specifying where |
|
720 | * `file`: a file containing a list of entries, specifying where | |
717 | to truncate each file. The file should contain a list of |
|
721 | to truncate each file. The file should contain a list of | |
718 | file\0offset pairs, delimited by newlines. The corresponding |
|
722 | file\0offset pairs, delimited by newlines. The corresponding | |
719 | '*.backupfiles' file should contain a list of file\0backupfile |
|
723 | '*.backupfiles' file should contain a list of file\0backupfile | |
720 | pairs, delimited by \0. |
|
724 | pairs, delimited by \0. | |
721 |
|
725 | |||
722 | `checkambigfiles` is a set of (path, vfs-location) tuples, |
|
726 | `checkambigfiles` is a set of (path, vfs-location) tuples, | |
723 | which determine whether file stat ambiguity should be avoided at |
|
727 | which determine whether file stat ambiguity should be avoided at | |
724 | restoring corresponded files. |
|
728 | restoring corresponded files. | |
725 | """ |
|
729 | """ | |
726 | entries = [] |
|
730 | entries = [] | |
727 | backupentries = [] |
|
731 | backupentries = [] | |
728 |
|
732 | |||
729 | with opener.open(file) as fp: |
|
733 | with opener.open(file) as fp: | |
730 | lines = fp.readlines() |
|
734 | lines = fp.readlines() | |
731 | for l in lines: |
|
735 | for l in lines: | |
732 | try: |
|
736 | try: | |
733 | f, o = l.split(b'\0') |
|
737 | f, o = l.split(b'\0') | |
734 | entries.append((f, int(o))) |
|
738 | entries.append((f, int(o))) | |
735 | except ValueError: |
|
739 | except ValueError: | |
736 | report( |
|
740 | report( | |
737 | _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l) |
|
741 | _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l) | |
738 | ) |
|
742 | ) | |
739 |
|
743 | |||
740 | backupjournal = b"%s.backupfiles" % file |
|
744 | backupjournal = b"%s.backupfiles" % file | |
741 | if opener.exists(backupjournal): |
|
745 | if opener.exists(backupjournal): | |
742 | fp = opener.open(backupjournal) |
|
746 | fp = opener.open(backupjournal) | |
743 | lines = fp.readlines() |
|
747 | lines = fp.readlines() | |
744 | if lines: |
|
748 | if lines: | |
745 | ver = lines[0][:-1] |
|
749 | ver = lines[0][:-1] | |
746 | if ver != (b'%d' % version): |
|
750 | if ver != (b'%d' % version): | |
747 | report(BAD_VERSION_MSG) |
|
751 | report(BAD_VERSION_MSG) | |
748 | else: |
|
752 | else: | |
749 | for line in lines[1:]: |
|
753 | for line in lines[1:]: | |
750 | if line: |
|
754 | if line: | |
751 | # Shave off the trailing newline |
|
755 | # Shave off the trailing newline | |
752 | line = line[:-1] |
|
756 | line = line[:-1] | |
753 | l, f, b, c = line.split(b'\0') |
|
757 | l, f, b, c = line.split(b'\0') | |
754 | backupentries.append((l, f, b, bool(c))) |
|
758 | backupentries.append((l, f, b, bool(c))) | |
755 |
|
759 | |||
756 | _playback( |
|
760 | _playback( | |
757 | file, |
|
761 | file, | |
758 | report, |
|
762 | report, | |
759 | opener, |
|
763 | opener, | |
760 | vfsmap, |
|
764 | vfsmap, | |
761 | entries, |
|
765 | entries, | |
762 | backupentries, |
|
766 | backupentries, | |
763 | checkambigfiles=checkambigfiles, |
|
767 | checkambigfiles=checkambigfiles, | |
764 | ) |
|
768 | ) |
General Comments 0
You need to be logged in to leave comments.
Login now