Show More
@@ -1,764 +1,764 b'' | |||||
1 | # transaction.py - simple journaling scheme for mercurial |
|
1 | # transaction.py - simple journaling scheme for mercurial | |
2 | # |
|
2 | # | |
3 | # This transaction scheme is intended to gracefully handle program |
|
3 | # This transaction scheme is intended to gracefully handle program | |
4 | # errors and interruptions. More serious failures like system crashes |
|
4 | # errors and interruptions. More serious failures like system crashes | |
5 | # can be recovered with an fsck-like tool. As the whole repository is |
|
5 | # can be recovered with an fsck-like tool. As the whole repository is | |
6 | # effectively log-structured, this should amount to simply truncating |
|
6 | # effectively log-structured, this should amount to simply truncating | |
7 | # anything that isn't referenced in the changelog. |
|
7 | # anything that isn't referenced in the changelog. | |
8 | # |
|
8 | # | |
9 | # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com> |
|
9 | # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com> | |
10 | # |
|
10 | # | |
11 | # This software may be used and distributed according to the terms of the |
|
11 | # This software may be used and distributed according to the terms of the | |
12 | # GNU General Public License version 2 or any later version. |
|
12 | # GNU General Public License version 2 or any later version. | |
13 |
|
13 | |||
14 | from __future__ import absolute_import |
|
14 | from __future__ import absolute_import | |
15 |
|
15 | |||
16 | import errno |
|
16 | import errno | |
17 |
|
17 | |||
18 | from .i18n import _ |
|
18 | from .i18n import _ | |
19 | from . import ( |
|
19 | from . import ( | |
20 | error, |
|
20 | error, | |
21 | pycompat, |
|
21 | pycompat, | |
22 | util, |
|
22 | util, | |
23 | ) |
|
23 | ) | |
24 | from .utils import stringutil |
|
24 | from .utils import stringutil | |
25 |
|
25 | |||
26 | version = 2 |
|
26 | version = 2 | |
27 |
|
27 | |||
28 | # These are the file generators that should only be executed after the |
|
28 | # These are the file generators that should only be executed after the | |
29 | # finalizers are done, since they rely on the output of the finalizers (like |
|
29 | # finalizers are done, since they rely on the output of the finalizers (like | |
30 | # the changelog having been written). |
|
30 | # the changelog having been written). | |
31 | postfinalizegenerators = {b'bookmarks', b'dirstate'} |
|
31 | postfinalizegenerators = {b'bookmarks', b'dirstate'} | |
32 |
|
32 | |||
33 | GEN_GROUP_ALL = b'all' |
|
33 | GEN_GROUP_ALL = b'all' | |
34 | GEN_GROUP_PRE_FINALIZE = b'prefinalize' |
|
34 | GEN_GROUP_PRE_FINALIZE = b'prefinalize' | |
35 | GEN_GROUP_POST_FINALIZE = b'postfinalize' |
|
35 | GEN_GROUP_POST_FINALIZE = b'postfinalize' | |
36 |
|
36 | |||
37 |
|
37 | |||
38 | def active(func): |
|
38 | def active(func): | |
39 | def _active(self, *args, **kwds): |
|
39 | def _active(self, *args, **kwds): | |
40 | if self._count == 0: |
|
40 | if self._count == 0: | |
41 | raise error.ProgrammingError( |
|
41 | raise error.ProgrammingError( | |
42 | b'cannot use transaction when it is already committed/aborted' |
|
42 | b'cannot use transaction when it is already committed/aborted' | |
43 | ) |
|
43 | ) | |
44 | return func(self, *args, **kwds) |
|
44 | return func(self, *args, **kwds) | |
45 |
|
45 | |||
46 | return _active |
|
46 | return _active | |
47 |
|
47 | |||
48 |
|
48 | |||
49 | def _playback( |
|
49 | def _playback( | |
50 | journal, |
|
50 | journal, | |
51 | report, |
|
51 | report, | |
52 | opener, |
|
52 | opener, | |
53 | vfsmap, |
|
53 | vfsmap, | |
54 | entries, |
|
54 | entries, | |
55 | backupentries, |
|
55 | backupentries, | |
56 | unlink=True, |
|
56 | unlink=True, | |
57 | checkambigfiles=None, |
|
57 | checkambigfiles=None, | |
58 | ): |
|
58 | ): | |
59 | for f, o in entries: |
|
59 | for f, o in sorted(dict(entries).items()): | |
60 | if o or not unlink: |
|
60 | if o or not unlink: | |
61 | checkambig = checkambigfiles and (f, b'') in checkambigfiles |
|
61 | checkambig = checkambigfiles and (f, b'') in checkambigfiles | |
62 | try: |
|
62 | try: | |
63 | fp = opener(f, b'a', checkambig=checkambig) |
|
63 | fp = opener(f, b'a', checkambig=checkambig) | |
64 | if fp.tell() < o: |
|
64 | if fp.tell() < o: | |
65 | raise error.Abort( |
|
65 | raise error.Abort( | |
66 | _( |
|
66 | _( | |
67 | b"attempted to truncate %s to %d bytes, but it was " |
|
67 | b"attempted to truncate %s to %d bytes, but it was " | |
68 | b"already %d bytes\n" |
|
68 | b"already %d bytes\n" | |
69 | ) |
|
69 | ) | |
70 | % (f, o, fp.tell()) |
|
70 | % (f, o, fp.tell()) | |
71 | ) |
|
71 | ) | |
72 | fp.truncate(o) |
|
72 | fp.truncate(o) | |
73 | fp.close() |
|
73 | fp.close() | |
74 | except IOError: |
|
74 | except IOError: | |
75 | report(_(b"failed to truncate %s\n") % f) |
|
75 | report(_(b"failed to truncate %s\n") % f) | |
76 | raise |
|
76 | raise | |
77 | else: |
|
77 | else: | |
78 | try: |
|
78 | try: | |
79 | opener.unlink(f) |
|
79 | opener.unlink(f) | |
80 | except (IOError, OSError) as inst: |
|
80 | except (IOError, OSError) as inst: | |
81 | if inst.errno != errno.ENOENT: |
|
81 | if inst.errno != errno.ENOENT: | |
82 | raise |
|
82 | raise | |
83 |
|
83 | |||
84 | backupfiles = [] |
|
84 | backupfiles = [] | |
85 | for l, f, b, c in backupentries: |
|
85 | for l, f, b, c in backupentries: | |
86 | if l not in vfsmap and c: |
|
86 | if l not in vfsmap and c: | |
87 | report(b"couldn't handle %s: unknown cache location %s\n" % (b, l)) |
|
87 | report(b"couldn't handle %s: unknown cache location %s\n" % (b, l)) | |
88 | vfs = vfsmap[l] |
|
88 | vfs = vfsmap[l] | |
89 | try: |
|
89 | try: | |
90 | if f and b: |
|
90 | if f and b: | |
91 | filepath = vfs.join(f) |
|
91 | filepath = vfs.join(f) | |
92 | backuppath = vfs.join(b) |
|
92 | backuppath = vfs.join(b) | |
93 | checkambig = checkambigfiles and (f, l) in checkambigfiles |
|
93 | checkambig = checkambigfiles and (f, l) in checkambigfiles | |
94 | try: |
|
94 | try: | |
95 | util.copyfile(backuppath, filepath, checkambig=checkambig) |
|
95 | util.copyfile(backuppath, filepath, checkambig=checkambig) | |
96 | backupfiles.append(b) |
|
96 | backupfiles.append(b) | |
97 | except IOError: |
|
97 | except IOError: | |
98 | report(_(b"failed to recover %s\n") % f) |
|
98 | report(_(b"failed to recover %s\n") % f) | |
99 | else: |
|
99 | else: | |
100 | target = f or b |
|
100 | target = f or b | |
101 | try: |
|
101 | try: | |
102 | vfs.unlink(target) |
|
102 | vfs.unlink(target) | |
103 | except (IOError, OSError) as inst: |
|
103 | except (IOError, OSError) as inst: | |
104 | if inst.errno != errno.ENOENT: |
|
104 | if inst.errno != errno.ENOENT: | |
105 | raise |
|
105 | raise | |
106 | except (IOError, OSError, error.Abort): |
|
106 | except (IOError, OSError, error.Abort): | |
107 | if not c: |
|
107 | if not c: | |
108 | raise |
|
108 | raise | |
109 |
|
109 | |||
110 | backuppath = b"%s.backupfiles" % journal |
|
110 | backuppath = b"%s.backupfiles" % journal | |
111 | if opener.exists(backuppath): |
|
111 | if opener.exists(backuppath): | |
112 | opener.unlink(backuppath) |
|
112 | opener.unlink(backuppath) | |
113 | opener.unlink(journal) |
|
113 | opener.unlink(journal) | |
114 | try: |
|
114 | try: | |
115 | for f in backupfiles: |
|
115 | for f in backupfiles: | |
116 | if opener.exists(f): |
|
116 | if opener.exists(f): | |
117 | opener.unlink(f) |
|
117 | opener.unlink(f) | |
118 | except (IOError, OSError, error.Abort): |
|
118 | except (IOError, OSError, error.Abort): | |
119 | # only pure backup file remains, it is sage to ignore any error |
|
119 | # only pure backup file remains, it is sage to ignore any error | |
120 | pass |
|
120 | pass | |
121 |
|
121 | |||
122 |
|
122 | |||
123 | class transaction(util.transactional): |
|
123 | class transaction(util.transactional): | |
124 | def __init__( |
|
124 | def __init__( | |
125 | self, |
|
125 | self, | |
126 | report, |
|
126 | report, | |
127 | opener, |
|
127 | opener, | |
128 | vfsmap, |
|
128 | vfsmap, | |
129 | journalname, |
|
129 | journalname, | |
130 | undoname=None, |
|
130 | undoname=None, | |
131 | after=None, |
|
131 | after=None, | |
132 | createmode=None, |
|
132 | createmode=None, | |
133 | validator=None, |
|
133 | validator=None, | |
134 | releasefn=None, |
|
134 | releasefn=None, | |
135 | checkambigfiles=None, |
|
135 | checkambigfiles=None, | |
136 | name='<unnamed>', |
|
136 | name='<unnamed>', | |
137 | ): |
|
137 | ): | |
138 | """Begin a new transaction |
|
138 | """Begin a new transaction | |
139 |
|
139 | |||
140 | Begins a new transaction that allows rolling back writes in the event of |
|
140 | Begins a new transaction that allows rolling back writes in the event of | |
141 | an exception. |
|
141 | an exception. | |
142 |
|
142 | |||
143 | * `after`: called after the transaction has been committed |
|
143 | * `after`: called after the transaction has been committed | |
144 | * `createmode`: the mode of the journal file that will be created |
|
144 | * `createmode`: the mode of the journal file that will be created | |
145 | * `releasefn`: called after releasing (with transaction and result) |
|
145 | * `releasefn`: called after releasing (with transaction and result) | |
146 |
|
146 | |||
147 | `checkambigfiles` is a set of (path, vfs-location) tuples, |
|
147 | `checkambigfiles` is a set of (path, vfs-location) tuples, | |
148 | which determine whether file stat ambiguity should be avoided |
|
148 | which determine whether file stat ambiguity should be avoided | |
149 | for corresponded files. |
|
149 | for corresponded files. | |
150 | """ |
|
150 | """ | |
151 | self._count = 1 |
|
151 | self._count = 1 | |
152 | self._usages = 1 |
|
152 | self._usages = 1 | |
153 | self._report = report |
|
153 | self._report = report | |
154 | # a vfs to the store content |
|
154 | # a vfs to the store content | |
155 | self._opener = opener |
|
155 | self._opener = opener | |
156 | # a map to access file in various {location -> vfs} |
|
156 | # a map to access file in various {location -> vfs} | |
157 | vfsmap = vfsmap.copy() |
|
157 | vfsmap = vfsmap.copy() | |
158 | vfsmap[b''] = opener # set default value |
|
158 | vfsmap[b''] = opener # set default value | |
159 | self._vfsmap = vfsmap |
|
159 | self._vfsmap = vfsmap | |
160 | self._after = after |
|
160 | self._after = after | |
161 | self._offsetmap = {} |
|
161 | self._offsetmap = {} | |
162 | self._newfiles = set() |
|
162 | self._newfiles = set() | |
163 | self._journal = journalname |
|
163 | self._journal = journalname | |
164 | self._undoname = undoname |
|
164 | self._undoname = undoname | |
165 | self._queue = [] |
|
165 | self._queue = [] | |
166 | # A callback to do something just after releasing transaction. |
|
166 | # A callback to do something just after releasing transaction. | |
167 | if releasefn is None: |
|
167 | if releasefn is None: | |
168 | releasefn = lambda tr, success: None |
|
168 | releasefn = lambda tr, success: None | |
169 | self._releasefn = releasefn |
|
169 | self._releasefn = releasefn | |
170 |
|
170 | |||
171 | self._checkambigfiles = set() |
|
171 | self._checkambigfiles = set() | |
172 | if checkambigfiles: |
|
172 | if checkambigfiles: | |
173 | self._checkambigfiles.update(checkambigfiles) |
|
173 | self._checkambigfiles.update(checkambigfiles) | |
174 |
|
174 | |||
175 | self._names = [name] |
|
175 | self._names = [name] | |
176 |
|
176 | |||
177 | # A dict dedicated to precisely tracking the changes introduced in the |
|
177 | # A dict dedicated to precisely tracking the changes introduced in the | |
178 | # transaction. |
|
178 | # transaction. | |
179 | self.changes = {} |
|
179 | self.changes = {} | |
180 |
|
180 | |||
181 | # a dict of arguments to be passed to hooks |
|
181 | # a dict of arguments to be passed to hooks | |
182 | self.hookargs = {} |
|
182 | self.hookargs = {} | |
183 | self._file = opener.open(self._journal, b"w+") |
|
183 | self._file = opener.open(self._journal, b"w+") | |
184 |
|
184 | |||
185 | # a list of ('location', 'path', 'backuppath', cache) entries. |
|
185 | # a list of ('location', 'path', 'backuppath', cache) entries. | |
186 | # - if 'backuppath' is empty, no file existed at backup time |
|
186 | # - if 'backuppath' is empty, no file existed at backup time | |
187 | # - if 'path' is empty, this is a temporary transaction file |
|
187 | # - if 'path' is empty, this is a temporary transaction file | |
188 | # - if 'location' is not empty, the path is outside main opener reach. |
|
188 | # - if 'location' is not empty, the path is outside main opener reach. | |
189 | # use 'location' value as a key in a vfsmap to find the right 'vfs' |
|
189 | # use 'location' value as a key in a vfsmap to find the right 'vfs' | |
190 | # (cache is currently unused) |
|
190 | # (cache is currently unused) | |
191 | self._backupentries = [] |
|
191 | self._backupentries = [] | |
192 | self._backupmap = {} |
|
192 | self._backupmap = {} | |
193 | self._backupjournal = b"%s.backupfiles" % self._journal |
|
193 | self._backupjournal = b"%s.backupfiles" % self._journal | |
194 | self._backupsfile = opener.open(self._backupjournal, b'w') |
|
194 | self._backupsfile = opener.open(self._backupjournal, b'w') | |
195 | self._backupsfile.write(b'%d\n' % version) |
|
195 | self._backupsfile.write(b'%d\n' % version) | |
196 |
|
196 | |||
197 | if createmode is not None: |
|
197 | if createmode is not None: | |
198 | opener.chmod(self._journal, createmode & 0o666) |
|
198 | opener.chmod(self._journal, createmode & 0o666) | |
199 | opener.chmod(self._backupjournal, createmode & 0o666) |
|
199 | opener.chmod(self._backupjournal, createmode & 0o666) | |
200 |
|
200 | |||
201 | # hold file generations to be performed on commit |
|
201 | # hold file generations to be performed on commit | |
202 | self._filegenerators = {} |
|
202 | self._filegenerators = {} | |
203 | # hold callback to write pending data for hooks |
|
203 | # hold callback to write pending data for hooks | |
204 | self._pendingcallback = {} |
|
204 | self._pendingcallback = {} | |
205 | # True is any pending data have been written ever |
|
205 | # True is any pending data have been written ever | |
206 | self._anypending = False |
|
206 | self._anypending = False | |
207 | # holds callback to call when writing the transaction |
|
207 | # holds callback to call when writing the transaction | |
208 | self._finalizecallback = {} |
|
208 | self._finalizecallback = {} | |
209 | # holds callback to call when validating the transaction |
|
209 | # holds callback to call when validating the transaction | |
210 | # should raise exception if anything is wrong |
|
210 | # should raise exception if anything is wrong | |
211 | self._validatecallback = {} |
|
211 | self._validatecallback = {} | |
212 | if validator is not None: |
|
212 | if validator is not None: | |
213 | self._validatecallback[b'001-userhooks'] = validator |
|
213 | self._validatecallback[b'001-userhooks'] = validator | |
214 | # hold callback for post transaction close |
|
214 | # hold callback for post transaction close | |
215 | self._postclosecallback = {} |
|
215 | self._postclosecallback = {} | |
216 | # holds callbacks to call during abort |
|
216 | # holds callbacks to call during abort | |
217 | self._abortcallback = {} |
|
217 | self._abortcallback = {} | |
218 |
|
218 | |||
219 | def __repr__(self): |
|
219 | def __repr__(self): | |
220 | name = '/'.join(self._names) |
|
220 | name = '/'.join(self._names) | |
221 | return '<transaction name=%s, count=%d, usages=%d>' % ( |
|
221 | return '<transaction name=%s, count=%d, usages=%d>' % ( | |
222 | name, |
|
222 | name, | |
223 | self._count, |
|
223 | self._count, | |
224 | self._usages, |
|
224 | self._usages, | |
225 | ) |
|
225 | ) | |
226 |
|
226 | |||
227 | def __del__(self): |
|
227 | def __del__(self): | |
228 | if self._journal: |
|
228 | if self._journal: | |
229 | self._abort() |
|
229 | self._abort() | |
230 |
|
230 | |||
231 | @active |
|
231 | @active | |
232 | def startgroup(self): |
|
232 | def startgroup(self): | |
233 | """delay registration of file entry |
|
233 | """delay registration of file entry | |
234 |
|
234 | |||
235 | This is used by strip to delay vision of strip offset. The transaction |
|
235 | This is used by strip to delay vision of strip offset. The transaction | |
236 | sees either none or all of the strip actions to be done.""" |
|
236 | sees either none or all of the strip actions to be done.""" | |
237 | self._queue.append([]) |
|
237 | self._queue.append([]) | |
238 |
|
238 | |||
239 | @active |
|
239 | @active | |
240 | def endgroup(self): |
|
240 | def endgroup(self): | |
241 | """apply delayed registration of file entry. |
|
241 | """apply delayed registration of file entry. | |
242 |
|
242 | |||
243 | This is used by strip to delay vision of strip offset. The transaction |
|
243 | This is used by strip to delay vision of strip offset. The transaction | |
244 | sees either none or all of the strip actions to be done.""" |
|
244 | sees either none or all of the strip actions to be done.""" | |
245 | q = self._queue.pop() |
|
245 | q = self._queue.pop() | |
246 | for f, o in q: |
|
246 | for f, o in q: | |
247 | self._addentry(f, o) |
|
247 | self._addentry(f, o) | |
248 |
|
248 | |||
249 | @active |
|
249 | @active | |
250 | def add(self, file, offset): |
|
250 | def add(self, file, offset): | |
251 | """record the state of an append-only file before update""" |
|
251 | """record the state of an append-only file before update""" | |
252 | if ( |
|
252 | if ( | |
253 | file in self._newfiles |
|
253 | file in self._newfiles | |
254 | or file in self._offsetmap |
|
254 | or file in self._offsetmap | |
255 | or file in self._backupmap |
|
255 | or file in self._backupmap | |
256 | ): |
|
256 | ): | |
257 | return |
|
257 | return | |
258 | if self._queue: |
|
258 | if self._queue: | |
259 | self._queue[-1].append((file, offset)) |
|
259 | self._queue[-1].append((file, offset)) | |
260 | return |
|
260 | return | |
261 |
|
261 | |||
262 | self._addentry(file, offset) |
|
262 | self._addentry(file, offset) | |
263 |
|
263 | |||
264 | def _addentry(self, file, offset): |
|
264 | def _addentry(self, file, offset): | |
265 | """add a append-only entry to memory and on-disk state""" |
|
265 | """add a append-only entry to memory and on-disk state""" | |
266 | if ( |
|
266 | if ( | |
267 | file in self._newfiles |
|
267 | file in self._newfiles | |
268 | or file in self._offsetmap |
|
268 | or file in self._offsetmap | |
269 | or file in self._backupmap |
|
269 | or file in self._backupmap | |
270 | ): |
|
270 | ): | |
271 | return |
|
271 | return | |
272 | if offset: |
|
272 | if offset: | |
273 | self._offsetmap[file] = offset |
|
273 | self._offsetmap[file] = offset | |
274 | else: |
|
274 | else: | |
275 | self._newfiles.add(file) |
|
275 | self._newfiles.add(file) | |
276 | # add enough data to the journal to do the truncate |
|
276 | # add enough data to the journal to do the truncate | |
277 | self._file.write(b"%s\0%d\n" % (file, offset)) |
|
277 | self._file.write(b"%s\0%d\n" % (file, offset)) | |
278 | self._file.flush() |
|
278 | self._file.flush() | |
279 |
|
279 | |||
280 | @active |
|
280 | @active | |
281 | def addbackup(self, file, hardlink=True, location=b''): |
|
281 | def addbackup(self, file, hardlink=True, location=b''): | |
282 | """Adds a backup of the file to the transaction |
|
282 | """Adds a backup of the file to the transaction | |
283 |
|
283 | |||
284 | Calling addbackup() creates a hardlink backup of the specified file |
|
284 | Calling addbackup() creates a hardlink backup of the specified file | |
285 | that is used to recover the file in the event of the transaction |
|
285 | that is used to recover the file in the event of the transaction | |
286 | aborting. |
|
286 | aborting. | |
287 |
|
287 | |||
288 | * `file`: the file path, relative to .hg/store |
|
288 | * `file`: the file path, relative to .hg/store | |
289 | * `hardlink`: use a hardlink to quickly create the backup |
|
289 | * `hardlink`: use a hardlink to quickly create the backup | |
290 | """ |
|
290 | """ | |
291 | if self._queue: |
|
291 | if self._queue: | |
292 | msg = b'cannot use transaction.addbackup inside "group"' |
|
292 | msg = b'cannot use transaction.addbackup inside "group"' | |
293 | raise error.ProgrammingError(msg) |
|
293 | raise error.ProgrammingError(msg) | |
294 |
|
294 | |||
295 | if ( |
|
295 | if ( | |
296 | file in self._newfiles |
|
296 | file in self._newfiles | |
297 | or file in self._offsetmap |
|
297 | or file in self._offsetmap | |
298 | or file in self._backupmap |
|
298 | or file in self._backupmap | |
299 | ): |
|
299 | ): | |
300 | return |
|
300 | return | |
301 | vfs = self._vfsmap[location] |
|
301 | vfs = self._vfsmap[location] | |
302 | dirname, filename = vfs.split(file) |
|
302 | dirname, filename = vfs.split(file) | |
303 | backupfilename = b"%s.backup.%s" % (self._journal, filename) |
|
303 | backupfilename = b"%s.backup.%s" % (self._journal, filename) | |
304 | backupfile = vfs.reljoin(dirname, backupfilename) |
|
304 | backupfile = vfs.reljoin(dirname, backupfilename) | |
305 | if vfs.exists(file): |
|
305 | if vfs.exists(file): | |
306 | filepath = vfs.join(file) |
|
306 | filepath = vfs.join(file) | |
307 | backuppath = vfs.join(backupfile) |
|
307 | backuppath = vfs.join(backupfile) | |
308 | util.copyfile(filepath, backuppath, hardlink=hardlink) |
|
308 | util.copyfile(filepath, backuppath, hardlink=hardlink) | |
309 | else: |
|
309 | else: | |
310 | backupfile = b'' |
|
310 | backupfile = b'' | |
311 |
|
311 | |||
312 | self._addbackupentry((location, file, backupfile, False)) |
|
312 | self._addbackupentry((location, file, backupfile, False)) | |
313 |
|
313 | |||
314 | def _addbackupentry(self, entry): |
|
314 | def _addbackupentry(self, entry): | |
315 | """register a new backup entry and write it to disk""" |
|
315 | """register a new backup entry and write it to disk""" | |
316 | self._backupentries.append(entry) |
|
316 | self._backupentries.append(entry) | |
317 | self._backupmap[entry[1]] = len(self._backupentries) - 1 |
|
317 | self._backupmap[entry[1]] = len(self._backupentries) - 1 | |
318 | self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry) |
|
318 | self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry) | |
319 | self._backupsfile.flush() |
|
319 | self._backupsfile.flush() | |
320 |
|
320 | |||
321 | @active |
|
321 | @active | |
322 | def registertmp(self, tmpfile, location=b''): |
|
322 | def registertmp(self, tmpfile, location=b''): | |
323 | """register a temporary transaction file |
|
323 | """register a temporary transaction file | |
324 |
|
324 | |||
325 | Such files will be deleted when the transaction exits (on both |
|
325 | Such files will be deleted when the transaction exits (on both | |
326 | failure and success). |
|
326 | failure and success). | |
327 | """ |
|
327 | """ | |
328 | self._addbackupentry((location, b'', tmpfile, False)) |
|
328 | self._addbackupentry((location, b'', tmpfile, False)) | |
329 |
|
329 | |||
330 | @active |
|
330 | @active | |
331 | def addfilegenerator( |
|
331 | def addfilegenerator( | |
332 | self, genid, filenames, genfunc, order=0, location=b'' |
|
332 | self, genid, filenames, genfunc, order=0, location=b'' | |
333 | ): |
|
333 | ): | |
334 | """add a function to generates some files at transaction commit |
|
334 | """add a function to generates some files at transaction commit | |
335 |
|
335 | |||
336 | The `genfunc` argument is a function capable of generating proper |
|
336 | The `genfunc` argument is a function capable of generating proper | |
337 | content of each entry in the `filename` tuple. |
|
337 | content of each entry in the `filename` tuple. | |
338 |
|
338 | |||
339 | At transaction close time, `genfunc` will be called with one file |
|
339 | At transaction close time, `genfunc` will be called with one file | |
340 | object argument per entries in `filenames`. |
|
340 | object argument per entries in `filenames`. | |
341 |
|
341 | |||
342 | The transaction itself is responsible for the backup, creation and |
|
342 | The transaction itself is responsible for the backup, creation and | |
343 | final write of such file. |
|
343 | final write of such file. | |
344 |
|
344 | |||
345 | The `genid` argument is used to ensure the same set of file is only |
|
345 | The `genid` argument is used to ensure the same set of file is only | |
346 | generated once. Call to `addfilegenerator` for a `genid` already |
|
346 | generated once. Call to `addfilegenerator` for a `genid` already | |
347 | present will overwrite the old entry. |
|
347 | present will overwrite the old entry. | |
348 |
|
348 | |||
349 | The `order` argument may be used to control the order in which multiple |
|
349 | The `order` argument may be used to control the order in which multiple | |
350 | generator will be executed. |
|
350 | generator will be executed. | |
351 |
|
351 | |||
352 | The `location` arguments may be used to indicate the files are located |
|
352 | The `location` arguments may be used to indicate the files are located | |
353 | outside of the the standard directory for transaction. It should match |
|
353 | outside of the the standard directory for transaction. It should match | |
354 | one of the key of the `transaction.vfsmap` dictionary. |
|
354 | one of the key of the `transaction.vfsmap` dictionary. | |
355 | """ |
|
355 | """ | |
356 | # For now, we are unable to do proper backup and restore of custom vfs |
|
356 | # For now, we are unable to do proper backup and restore of custom vfs | |
357 | # but for bookmarks that are handled outside this mechanism. |
|
357 | # but for bookmarks that are handled outside this mechanism. | |
358 | self._filegenerators[genid] = (order, filenames, genfunc, location) |
|
358 | self._filegenerators[genid] = (order, filenames, genfunc, location) | |
359 |
|
359 | |||
360 | @active |
|
360 | @active | |
361 | def removefilegenerator(self, genid): |
|
361 | def removefilegenerator(self, genid): | |
362 | """reverse of addfilegenerator, remove a file generator function""" |
|
362 | """reverse of addfilegenerator, remove a file generator function""" | |
363 | if genid in self._filegenerators: |
|
363 | if genid in self._filegenerators: | |
364 | del self._filegenerators[genid] |
|
364 | del self._filegenerators[genid] | |
365 |
|
365 | |||
366 | def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL): |
|
366 | def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL): | |
367 | # write files registered for generation |
|
367 | # write files registered for generation | |
368 | any = False |
|
368 | any = False | |
369 |
|
369 | |||
370 | if group == GEN_GROUP_ALL: |
|
370 | if group == GEN_GROUP_ALL: | |
371 | skip_post = skip_pre = False |
|
371 | skip_post = skip_pre = False | |
372 | else: |
|
372 | else: | |
373 | skip_pre = group == GEN_GROUP_POST_FINALIZE |
|
373 | skip_pre = group == GEN_GROUP_POST_FINALIZE | |
374 | skip_post = group == GEN_GROUP_PRE_FINALIZE |
|
374 | skip_post = group == GEN_GROUP_PRE_FINALIZE | |
375 |
|
375 | |||
376 | for id, entry in sorted(pycompat.iteritems(self._filegenerators)): |
|
376 | for id, entry in sorted(pycompat.iteritems(self._filegenerators)): | |
377 | any = True |
|
377 | any = True | |
378 | order, filenames, genfunc, location = entry |
|
378 | order, filenames, genfunc, location = entry | |
379 |
|
379 | |||
380 | # for generation at closing, check if it's before or after finalize |
|
380 | # for generation at closing, check if it's before or after finalize | |
381 | is_post = id in postfinalizegenerators |
|
381 | is_post = id in postfinalizegenerators | |
382 | if skip_post and is_post: |
|
382 | if skip_post and is_post: | |
383 | continue |
|
383 | continue | |
384 | elif skip_pre and not is_post: |
|
384 | elif skip_pre and not is_post: | |
385 | continue |
|
385 | continue | |
386 |
|
386 | |||
387 | vfs = self._vfsmap[location] |
|
387 | vfs = self._vfsmap[location] | |
388 | files = [] |
|
388 | files = [] | |
389 | try: |
|
389 | try: | |
390 | for name in filenames: |
|
390 | for name in filenames: | |
391 | name += suffix |
|
391 | name += suffix | |
392 | if suffix: |
|
392 | if suffix: | |
393 | self.registertmp(name, location=location) |
|
393 | self.registertmp(name, location=location) | |
394 | checkambig = False |
|
394 | checkambig = False | |
395 | else: |
|
395 | else: | |
396 | self.addbackup(name, location=location) |
|
396 | self.addbackup(name, location=location) | |
397 | checkambig = (name, location) in self._checkambigfiles |
|
397 | checkambig = (name, location) in self._checkambigfiles | |
398 | files.append( |
|
398 | files.append( | |
399 | vfs(name, b'w', atomictemp=True, checkambig=checkambig) |
|
399 | vfs(name, b'w', atomictemp=True, checkambig=checkambig) | |
400 | ) |
|
400 | ) | |
401 | genfunc(*files) |
|
401 | genfunc(*files) | |
402 | for f in files: |
|
402 | for f in files: | |
403 | f.close() |
|
403 | f.close() | |
404 | # skip discard() loop since we're sure no open file remains |
|
404 | # skip discard() loop since we're sure no open file remains | |
405 | del files[:] |
|
405 | del files[:] | |
406 | finally: |
|
406 | finally: | |
407 | for f in files: |
|
407 | for f in files: | |
408 | f.discard() |
|
408 | f.discard() | |
409 | return any |
|
409 | return any | |
410 |
|
410 | |||
411 | @active |
|
411 | @active | |
412 | def findoffset(self, file): |
|
412 | def findoffset(self, file): | |
413 | if file in self._newfiles: |
|
413 | if file in self._newfiles: | |
414 | return 0 |
|
414 | return 0 | |
415 | return self._offsetmap.get(file) |
|
415 | return self._offsetmap.get(file) | |
416 |
|
416 | |||
417 | @active |
|
417 | @active | |
418 | def readjournal(self): |
|
418 | def readjournal(self): | |
419 | self._file.seek(0) |
|
419 | self._file.seek(0) | |
420 | entries = [] |
|
420 | entries = [] | |
421 | for l in self._file.readlines(): |
|
421 | for l in self._file.readlines(): | |
422 | file, troffset = l.split(b'\0') |
|
422 | file, troffset = l.split(b'\0') | |
423 | entries.append((file, int(troffset))) |
|
423 | entries.append((file, int(troffset))) | |
424 | return entries |
|
424 | return entries | |
425 |
|
425 | |||
426 | @active |
|
426 | @active | |
427 | def replace(self, file, offset): |
|
427 | def replace(self, file, offset): | |
428 | """ |
|
428 | """ | |
429 | replace can only replace already committed entries |
|
429 | replace can only replace already committed entries | |
430 | that are not pending in the queue |
|
430 | that are not pending in the queue | |
431 | """ |
|
431 | """ | |
432 | if file in self._newfiles: |
|
432 | if file in self._newfiles: | |
433 | if not offset: |
|
433 | if not offset: | |
434 | return |
|
434 | return | |
435 | self._newfiles.remove(file) |
|
435 | self._newfiles.remove(file) | |
436 | self._offsetmap[file] = offset |
|
436 | self._offsetmap[file] = offset | |
437 | elif file in self._offsetmap: |
|
437 | elif file in self._offsetmap: | |
438 | if not offset: |
|
438 | if not offset: | |
439 | del self._offsetmap[file] |
|
439 | del self._offsetmap[file] | |
440 | self._newfiles.add(file) |
|
440 | self._newfiles.add(file) | |
441 | else: |
|
441 | else: | |
442 | self._offsetmap[file] = offset |
|
442 | self._offsetmap[file] = offset | |
443 | else: |
|
443 | else: | |
444 | raise KeyError(file) |
|
444 | raise KeyError(file) | |
445 | self._file.write(b"%s\0%d\n" % (file, offset)) |
|
445 | self._file.write(b"%s\0%d\n" % (file, offset)) | |
446 | self._file.flush() |
|
446 | self._file.flush() | |
447 |
|
447 | |||
448 | @active |
|
448 | @active | |
449 | def nest(self, name='<unnamed>'): |
|
449 | def nest(self, name='<unnamed>'): | |
450 | self._count += 1 |
|
450 | self._count += 1 | |
451 | self._usages += 1 |
|
451 | self._usages += 1 | |
452 | self._names.append(name) |
|
452 | self._names.append(name) | |
453 | return self |
|
453 | return self | |
454 |
|
454 | |||
455 | def release(self): |
|
455 | def release(self): | |
456 | if self._count > 0: |
|
456 | if self._count > 0: | |
457 | self._usages -= 1 |
|
457 | self._usages -= 1 | |
458 | if self._names: |
|
458 | if self._names: | |
459 | self._names.pop() |
|
459 | self._names.pop() | |
460 | # if the transaction scopes are left without being closed, fail |
|
460 | # if the transaction scopes are left without being closed, fail | |
461 | if self._count > 0 and self._usages == 0: |
|
461 | if self._count > 0 and self._usages == 0: | |
462 | self._abort() |
|
462 | self._abort() | |
463 |
|
463 | |||
464 | def running(self): |
|
464 | def running(self): | |
465 | return self._count > 0 |
|
465 | return self._count > 0 | |
466 |
|
466 | |||
467 | def addpending(self, category, callback): |
|
467 | def addpending(self, category, callback): | |
468 | """add a callback to be called when the transaction is pending |
|
468 | """add a callback to be called when the transaction is pending | |
469 |
|
469 | |||
470 | The transaction will be given as callback's first argument. |
|
470 | The transaction will be given as callback's first argument. | |
471 |
|
471 | |||
472 | Category is a unique identifier to allow overwriting an old callback |
|
472 | Category is a unique identifier to allow overwriting an old callback | |
473 | with a newer callback. |
|
473 | with a newer callback. | |
474 | """ |
|
474 | """ | |
475 | self._pendingcallback[category] = callback |
|
475 | self._pendingcallback[category] = callback | |
476 |
|
476 | |||
477 | @active |
|
477 | @active | |
478 | def writepending(self): |
|
478 | def writepending(self): | |
479 | """write pending file to temporary version |
|
479 | """write pending file to temporary version | |
480 |
|
480 | |||
481 | This is used to allow hooks to view a transaction before commit""" |
|
481 | This is used to allow hooks to view a transaction before commit""" | |
482 | categories = sorted(self._pendingcallback) |
|
482 | categories = sorted(self._pendingcallback) | |
483 | for cat in categories: |
|
483 | for cat in categories: | |
484 | # remove callback since the data will have been flushed |
|
484 | # remove callback since the data will have been flushed | |
485 | any = self._pendingcallback.pop(cat)(self) |
|
485 | any = self._pendingcallback.pop(cat)(self) | |
486 | self._anypending = self._anypending or any |
|
486 | self._anypending = self._anypending or any | |
487 | self._anypending |= self._generatefiles(suffix=b'.pending') |
|
487 | self._anypending |= self._generatefiles(suffix=b'.pending') | |
488 | return self._anypending |
|
488 | return self._anypending | |
489 |
|
489 | |||
490 | @active |
|
490 | @active | |
491 | def hasfinalize(self, category): |
|
491 | def hasfinalize(self, category): | |
492 | """check is a callback already exist for a category""" |
|
492 | """check is a callback already exist for a category""" | |
493 | return category in self._finalizecallback |
|
493 | return category in self._finalizecallback | |
494 |
|
494 | |||
495 | @active |
|
495 | @active | |
496 | def addfinalize(self, category, callback): |
|
496 | def addfinalize(self, category, callback): | |
497 | """add a callback to be called when the transaction is closed |
|
497 | """add a callback to be called when the transaction is closed | |
498 |
|
498 | |||
499 | The transaction will be given as callback's first argument. |
|
499 | The transaction will be given as callback's first argument. | |
500 |
|
500 | |||
501 | Category is a unique identifier to allow overwriting old callbacks with |
|
501 | Category is a unique identifier to allow overwriting old callbacks with | |
502 | newer callbacks. |
|
502 | newer callbacks. | |
503 | """ |
|
503 | """ | |
504 | self._finalizecallback[category] = callback |
|
504 | self._finalizecallback[category] = callback | |
505 |
|
505 | |||
506 | @active |
|
506 | @active | |
507 | def addpostclose(self, category, callback): |
|
507 | def addpostclose(self, category, callback): | |
508 | """add or replace a callback to be called after the transaction closed |
|
508 | """add or replace a callback to be called after the transaction closed | |
509 |
|
509 | |||
510 | The transaction will be given as callback's first argument. |
|
510 | The transaction will be given as callback's first argument. | |
511 |
|
511 | |||
512 | Category is a unique identifier to allow overwriting an old callback |
|
512 | Category is a unique identifier to allow overwriting an old callback | |
513 | with a newer callback. |
|
513 | with a newer callback. | |
514 | """ |
|
514 | """ | |
515 | self._postclosecallback[category] = callback |
|
515 | self._postclosecallback[category] = callback | |
516 |
|
516 | |||
517 | @active |
|
517 | @active | |
518 | def getpostclose(self, category): |
|
518 | def getpostclose(self, category): | |
519 | """return a postclose callback added before, or None""" |
|
519 | """return a postclose callback added before, or None""" | |
520 | return self._postclosecallback.get(category, None) |
|
520 | return self._postclosecallback.get(category, None) | |
521 |
|
521 | |||
522 | @active |
|
522 | @active | |
523 | def addabort(self, category, callback): |
|
523 | def addabort(self, category, callback): | |
524 | """add a callback to be called when the transaction is aborted. |
|
524 | """add a callback to be called when the transaction is aborted. | |
525 |
|
525 | |||
526 | The transaction will be given as the first argument to the callback. |
|
526 | The transaction will be given as the first argument to the callback. | |
527 |
|
527 | |||
528 | Category is a unique identifier to allow overwriting an old callback |
|
528 | Category is a unique identifier to allow overwriting an old callback | |
529 | with a newer callback. |
|
529 | with a newer callback. | |
530 | """ |
|
530 | """ | |
531 | self._abortcallback[category] = callback |
|
531 | self._abortcallback[category] = callback | |
532 |
|
532 | |||
533 | @active |
|
533 | @active | |
534 | def addvalidator(self, category, callback): |
|
534 | def addvalidator(self, category, callback): | |
535 | """adds a callback to be called when validating the transaction. |
|
535 | """adds a callback to be called when validating the transaction. | |
536 |
|
536 | |||
537 | The transaction will be given as the first argument to the callback. |
|
537 | The transaction will be given as the first argument to the callback. | |
538 |
|
538 | |||
539 | callback should raise exception if to abort transaction""" |
|
539 | callback should raise exception if to abort transaction""" | |
540 | self._validatecallback[category] = callback |
|
540 | self._validatecallback[category] = callback | |
541 |
|
541 | |||
542 | @active |
|
542 | @active | |
543 | def close(self): |
|
543 | def close(self): | |
544 | '''commit the transaction''' |
|
544 | '''commit the transaction''' | |
545 | if self._count == 1: |
|
545 | if self._count == 1: | |
546 | for category in sorted(self._validatecallback): |
|
546 | for category in sorted(self._validatecallback): | |
547 | self._validatecallback[category](self) |
|
547 | self._validatecallback[category](self) | |
548 | self._validatecallback = None # Help prevent cycles. |
|
548 | self._validatecallback = None # Help prevent cycles. | |
549 | self._generatefiles(group=GEN_GROUP_PRE_FINALIZE) |
|
549 | self._generatefiles(group=GEN_GROUP_PRE_FINALIZE) | |
550 | while self._finalizecallback: |
|
550 | while self._finalizecallback: | |
551 | callbacks = self._finalizecallback |
|
551 | callbacks = self._finalizecallback | |
552 | self._finalizecallback = {} |
|
552 | self._finalizecallback = {} | |
553 | categories = sorted(callbacks) |
|
553 | categories = sorted(callbacks) | |
554 | for cat in categories: |
|
554 | for cat in categories: | |
555 | callbacks[cat](self) |
|
555 | callbacks[cat](self) | |
556 | # Prevent double usage and help clear cycles. |
|
556 | # Prevent double usage and help clear cycles. | |
557 | self._finalizecallback = None |
|
557 | self._finalizecallback = None | |
558 | self._generatefiles(group=GEN_GROUP_POST_FINALIZE) |
|
558 | self._generatefiles(group=GEN_GROUP_POST_FINALIZE) | |
559 |
|
559 | |||
560 | self._count -= 1 |
|
560 | self._count -= 1 | |
561 | if self._count != 0: |
|
561 | if self._count != 0: | |
562 | return |
|
562 | return | |
563 | self._file.close() |
|
563 | self._file.close() | |
564 | self._backupsfile.close() |
|
564 | self._backupsfile.close() | |
565 | # cleanup temporary files |
|
565 | # cleanup temporary files | |
566 | for l, f, b, c in self._backupentries: |
|
566 | for l, f, b, c in self._backupentries: | |
567 | if l not in self._vfsmap and c: |
|
567 | if l not in self._vfsmap and c: | |
568 | self._report( |
|
568 | self._report( | |
569 | b"couldn't remove %s: unknown cache location %s\n" % (b, l) |
|
569 | b"couldn't remove %s: unknown cache location %s\n" % (b, l) | |
570 | ) |
|
570 | ) | |
571 | continue |
|
571 | continue | |
572 | vfs = self._vfsmap[l] |
|
572 | vfs = self._vfsmap[l] | |
573 | if not f and b and vfs.exists(b): |
|
573 | if not f and b and vfs.exists(b): | |
574 | try: |
|
574 | try: | |
575 | vfs.unlink(b) |
|
575 | vfs.unlink(b) | |
576 | except (IOError, OSError, error.Abort) as inst: |
|
576 | except (IOError, OSError, error.Abort) as inst: | |
577 | if not c: |
|
577 | if not c: | |
578 | raise |
|
578 | raise | |
579 | # Abort may be raise by read only opener |
|
579 | # Abort may be raise by read only opener | |
580 | self._report( |
|
580 | self._report( | |
581 | b"couldn't remove %s: %s\n" % (vfs.join(b), inst) |
|
581 | b"couldn't remove %s: %s\n" % (vfs.join(b), inst) | |
582 | ) |
|
582 | ) | |
583 | self._offsetmap = {} |
|
583 | self._offsetmap = {} | |
584 | self._newfiles = set() |
|
584 | self._newfiles = set() | |
585 | self._writeundo() |
|
585 | self._writeundo() | |
586 | if self._after: |
|
586 | if self._after: | |
587 | self._after() |
|
587 | self._after() | |
588 | self._after = None # Help prevent cycles. |
|
588 | self._after = None # Help prevent cycles. | |
589 | if self._opener.isfile(self._backupjournal): |
|
589 | if self._opener.isfile(self._backupjournal): | |
590 | self._opener.unlink(self._backupjournal) |
|
590 | self._opener.unlink(self._backupjournal) | |
591 | if self._opener.isfile(self._journal): |
|
591 | if self._opener.isfile(self._journal): | |
592 | self._opener.unlink(self._journal) |
|
592 | self._opener.unlink(self._journal) | |
593 | for l, _f, b, c in self._backupentries: |
|
593 | for l, _f, b, c in self._backupentries: | |
594 | if l not in self._vfsmap and c: |
|
594 | if l not in self._vfsmap and c: | |
595 | self._report( |
|
595 | self._report( | |
596 | b"couldn't remove %s: unknown cache location" |
|
596 | b"couldn't remove %s: unknown cache location" | |
597 | b"%s\n" % (b, l) |
|
597 | b"%s\n" % (b, l) | |
598 | ) |
|
598 | ) | |
599 | continue |
|
599 | continue | |
600 | vfs = self._vfsmap[l] |
|
600 | vfs = self._vfsmap[l] | |
601 | if b and vfs.exists(b): |
|
601 | if b and vfs.exists(b): | |
602 | try: |
|
602 | try: | |
603 | vfs.unlink(b) |
|
603 | vfs.unlink(b) | |
604 | except (IOError, OSError, error.Abort) as inst: |
|
604 | except (IOError, OSError, error.Abort) as inst: | |
605 | if not c: |
|
605 | if not c: | |
606 | raise |
|
606 | raise | |
607 | # Abort may be raise by read only opener |
|
607 | # Abort may be raise by read only opener | |
608 | self._report( |
|
608 | self._report( | |
609 | b"couldn't remove %s: %s\n" % (vfs.join(b), inst) |
|
609 | b"couldn't remove %s: %s\n" % (vfs.join(b), inst) | |
610 | ) |
|
610 | ) | |
611 | self._backupentries = [] |
|
611 | self._backupentries = [] | |
612 | self._journal = None |
|
612 | self._journal = None | |
613 |
|
613 | |||
614 | self._releasefn(self, True) # notify success of closing transaction |
|
614 | self._releasefn(self, True) # notify success of closing transaction | |
615 | self._releasefn = None # Help prevent cycles. |
|
615 | self._releasefn = None # Help prevent cycles. | |
616 |
|
616 | |||
617 | # run post close action |
|
617 | # run post close action | |
618 | categories = sorted(self._postclosecallback) |
|
618 | categories = sorted(self._postclosecallback) | |
619 | for cat in categories: |
|
619 | for cat in categories: | |
620 | self._postclosecallback[cat](self) |
|
620 | self._postclosecallback[cat](self) | |
621 | # Prevent double usage and help clear cycles. |
|
621 | # Prevent double usage and help clear cycles. | |
622 | self._postclosecallback = None |
|
622 | self._postclosecallback = None | |
623 |
|
623 | |||
624 | @active |
|
624 | @active | |
625 | def abort(self): |
|
625 | def abort(self): | |
626 | """abort the transaction (generally called on error, or when the |
|
626 | """abort the transaction (generally called on error, or when the | |
627 | transaction is not explicitly committed before going out of |
|
627 | transaction is not explicitly committed before going out of | |
628 | scope)""" |
|
628 | scope)""" | |
629 | self._abort() |
|
629 | self._abort() | |
630 |
|
630 | |||
631 | def _writeundo(self): |
|
631 | def _writeundo(self): | |
632 | """write transaction data for possible future undo call""" |
|
632 | """write transaction data for possible future undo call""" | |
633 | if self._undoname is None: |
|
633 | if self._undoname is None: | |
634 | return |
|
634 | return | |
635 | undobackupfile = self._opener.open( |
|
635 | undobackupfile = self._opener.open( | |
636 | b"%s.backupfiles" % self._undoname, b'w' |
|
636 | b"%s.backupfiles" % self._undoname, b'w' | |
637 | ) |
|
637 | ) | |
638 | undobackupfile.write(b'%d\n' % version) |
|
638 | undobackupfile.write(b'%d\n' % version) | |
639 | for l, f, b, c in self._backupentries: |
|
639 | for l, f, b, c in self._backupentries: | |
640 | if not f: # temporary file |
|
640 | if not f: # temporary file | |
641 | continue |
|
641 | continue | |
642 | if not b: |
|
642 | if not b: | |
643 | u = b'' |
|
643 | u = b'' | |
644 | else: |
|
644 | else: | |
645 | if l not in self._vfsmap and c: |
|
645 | if l not in self._vfsmap and c: | |
646 | self._report( |
|
646 | self._report( | |
647 | b"couldn't remove %s: unknown cache location" |
|
647 | b"couldn't remove %s: unknown cache location" | |
648 | b"%s\n" % (b, l) |
|
648 | b"%s\n" % (b, l) | |
649 | ) |
|
649 | ) | |
650 | continue |
|
650 | continue | |
651 | vfs = self._vfsmap[l] |
|
651 | vfs = self._vfsmap[l] | |
652 | base, name = vfs.split(b) |
|
652 | base, name = vfs.split(b) | |
653 | assert name.startswith(self._journal), name |
|
653 | assert name.startswith(self._journal), name | |
654 | uname = name.replace(self._journal, self._undoname, 1) |
|
654 | uname = name.replace(self._journal, self._undoname, 1) | |
655 | u = vfs.reljoin(base, uname) |
|
655 | u = vfs.reljoin(base, uname) | |
656 | util.copyfile(vfs.join(b), vfs.join(u), hardlink=True) |
|
656 | util.copyfile(vfs.join(b), vfs.join(u), hardlink=True) | |
657 | undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c)) |
|
657 | undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c)) | |
658 | undobackupfile.close() |
|
658 | undobackupfile.close() | |
659 |
|
659 | |||
660 | def _abort(self): |
|
660 | def _abort(self): | |
661 | entries = self.readjournal() |
|
661 | entries = self.readjournal() | |
662 | self._count = 0 |
|
662 | self._count = 0 | |
663 | self._usages = 0 |
|
663 | self._usages = 0 | |
664 | self._file.close() |
|
664 | self._file.close() | |
665 | self._backupsfile.close() |
|
665 | self._backupsfile.close() | |
666 |
|
666 | |||
667 | try: |
|
667 | try: | |
668 | if not entries and not self._backupentries: |
|
668 | if not entries and not self._backupentries: | |
669 | if self._backupjournal: |
|
669 | if self._backupjournal: | |
670 | self._opener.unlink(self._backupjournal) |
|
670 | self._opener.unlink(self._backupjournal) | |
671 | if self._journal: |
|
671 | if self._journal: | |
672 | self._opener.unlink(self._journal) |
|
672 | self._opener.unlink(self._journal) | |
673 | return |
|
673 | return | |
674 |
|
674 | |||
675 | self._report(_(b"transaction abort!\n")) |
|
675 | self._report(_(b"transaction abort!\n")) | |
676 |
|
676 | |||
677 | try: |
|
677 | try: | |
678 | for cat in sorted(self._abortcallback): |
|
678 | for cat in sorted(self._abortcallback): | |
679 | self._abortcallback[cat](self) |
|
679 | self._abortcallback[cat](self) | |
680 | # Prevent double usage and help clear cycles. |
|
680 | # Prevent double usage and help clear cycles. | |
681 | self._abortcallback = None |
|
681 | self._abortcallback = None | |
682 | _playback( |
|
682 | _playback( | |
683 | self._journal, |
|
683 | self._journal, | |
684 | self._report, |
|
684 | self._report, | |
685 | self._opener, |
|
685 | self._opener, | |
686 | self._vfsmap, |
|
686 | self._vfsmap, | |
687 | entries, |
|
687 | entries, | |
688 | self._backupentries, |
|
688 | self._backupentries, | |
689 | False, |
|
689 | False, | |
690 | checkambigfiles=self._checkambigfiles, |
|
690 | checkambigfiles=self._checkambigfiles, | |
691 | ) |
|
691 | ) | |
692 | self._report(_(b"rollback completed\n")) |
|
692 | self._report(_(b"rollback completed\n")) | |
693 | except BaseException as exc: |
|
693 | except BaseException as exc: | |
694 | self._report(_(b"rollback failed - please run hg recover\n")) |
|
694 | self._report(_(b"rollback failed - please run hg recover\n")) | |
695 | self._report( |
|
695 | self._report( | |
696 | _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc) |
|
696 | _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc) | |
697 | ) |
|
697 | ) | |
698 | finally: |
|
698 | finally: | |
699 | self._journal = None |
|
699 | self._journal = None | |
700 | self._releasefn(self, False) # notify failure of transaction |
|
700 | self._releasefn(self, False) # notify failure of transaction | |
701 | self._releasefn = None # Help prevent cycles. |
|
701 | self._releasefn = None # Help prevent cycles. | |
702 |
|
702 | |||
703 |
|
703 | |||
704 | def rollback(opener, vfsmap, file, report, checkambigfiles=None): |
|
704 | def rollback(opener, vfsmap, file, report, checkambigfiles=None): | |
705 | """Rolls back the transaction contained in the given file |
|
705 | """Rolls back the transaction contained in the given file | |
706 |
|
706 | |||
707 | Reads the entries in the specified file, and the corresponding |
|
707 | Reads the entries in the specified file, and the corresponding | |
708 | '*.backupfiles' file, to recover from an incomplete transaction. |
|
708 | '*.backupfiles' file, to recover from an incomplete transaction. | |
709 |
|
709 | |||
710 | * `file`: a file containing a list of entries, specifying where |
|
710 | * `file`: a file containing a list of entries, specifying where | |
711 | to truncate each file. The file should contain a list of |
|
711 | to truncate each file. The file should contain a list of | |
712 | file\0offset pairs, delimited by newlines. The corresponding |
|
712 | file\0offset pairs, delimited by newlines. The corresponding | |
713 | '*.backupfiles' file should contain a list of file\0backupfile |
|
713 | '*.backupfiles' file should contain a list of file\0backupfile | |
714 | pairs, delimited by \0. |
|
714 | pairs, delimited by \0. | |
715 |
|
715 | |||
716 | `checkambigfiles` is a set of (path, vfs-location) tuples, |
|
716 | `checkambigfiles` is a set of (path, vfs-location) tuples, | |
717 | which determine whether file stat ambiguity should be avoided at |
|
717 | which determine whether file stat ambiguity should be avoided at | |
718 | restoring corresponded files. |
|
718 | restoring corresponded files. | |
719 | """ |
|
719 | """ | |
720 | entries = [] |
|
720 | entries = [] | |
721 | backupentries = [] |
|
721 | backupentries = [] | |
722 |
|
722 | |||
723 | fp = opener.open(file) |
|
723 | fp = opener.open(file) | |
724 | lines = fp.readlines() |
|
724 | lines = fp.readlines() | |
725 | fp.close() |
|
725 | fp.close() | |
726 | for l in lines: |
|
726 | for l in lines: | |
727 | try: |
|
727 | try: | |
728 | f, o = l.split(b'\0') |
|
728 | f, o = l.split(b'\0') | |
729 | entries.append((f, int(o))) |
|
729 | entries.append((f, int(o))) | |
730 | except ValueError: |
|
730 | except ValueError: | |
731 | report( |
|
731 | report( | |
732 | _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l) |
|
732 | _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l) | |
733 | ) |
|
733 | ) | |
734 |
|
734 | |||
735 | backupjournal = b"%s.backupfiles" % file |
|
735 | backupjournal = b"%s.backupfiles" % file | |
736 | if opener.exists(backupjournal): |
|
736 | if opener.exists(backupjournal): | |
737 | fp = opener.open(backupjournal) |
|
737 | fp = opener.open(backupjournal) | |
738 | lines = fp.readlines() |
|
738 | lines = fp.readlines() | |
739 | if lines: |
|
739 | if lines: | |
740 | ver = lines[0][:-1] |
|
740 | ver = lines[0][:-1] | |
741 | if ver == (b'%d' % version): |
|
741 | if ver == (b'%d' % version): | |
742 | for line in lines[1:]: |
|
742 | for line in lines[1:]: | |
743 | if line: |
|
743 | if line: | |
744 | # Shave off the trailing newline |
|
744 | # Shave off the trailing newline | |
745 | line = line[:-1] |
|
745 | line = line[:-1] | |
746 | l, f, b, c = line.split(b'\0') |
|
746 | l, f, b, c = line.split(b'\0') | |
747 | backupentries.append((l, f, b, bool(c))) |
|
747 | backupentries.append((l, f, b, bool(c))) | |
748 | else: |
|
748 | else: | |
749 | report( |
|
749 | report( | |
750 | _( |
|
750 | _( | |
751 | b"journal was created by a different version of " |
|
751 | b"journal was created by a different version of " | |
752 | b"Mercurial\n" |
|
752 | b"Mercurial\n" | |
753 | ) |
|
753 | ) | |
754 | ) |
|
754 | ) | |
755 |
|
755 | |||
756 | _playback( |
|
756 | _playback( | |
757 | file, |
|
757 | file, | |
758 | report, |
|
758 | report, | |
759 | opener, |
|
759 | opener, | |
760 | vfsmap, |
|
760 | vfsmap, | |
761 | entries, |
|
761 | entries, | |
762 | backupentries, |
|
762 | backupentries, | |
763 | checkambigfiles=checkambigfiles, |
|
763 | checkambigfiles=checkambigfiles, | |
764 | ) |
|
764 | ) |
@@ -1,86 +1,170 b'' | |||||
1 | Test correctness of revlog inline -> non-inline transition |
|
1 | Test correctness of revlog inline -> non-inline transition | |
2 | ---------------------------------------------------------- |
|
2 | ---------------------------------------------------------- | |
3 |
|
3 | |||
4 | Helper extension to intercept renames. |
|
4 | Helper extension to intercept renames. | |
5 |
|
5 | |||
6 | $ cat > $TESTTMP/intercept_rename.py << EOF |
|
6 | $ cat > $TESTTMP/intercept_rename.py << EOF | |
7 | > import os |
|
7 | > import os | |
8 | > import sys |
|
8 | > import sys | |
9 | > from mercurial import extensions, util |
|
9 | > from mercurial import extensions, util | |
10 | > |
|
10 | > | |
11 | > def extsetup(ui): |
|
11 | > def extsetup(ui): | |
12 | > def close(orig, *args, **kwargs): |
|
12 | > def close(orig, *args, **kwargs): | |
13 | > path = args[0]._atomictempfile__name |
|
13 | > path = args[0]._atomictempfile__name | |
14 | > if path.endswith(b'/.hg/store/data/file.i'): |
|
14 | > if path.endswith(b'/.hg/store/data/file.i'): | |
15 | > os._exit(80) |
|
15 | > os._exit(80) | |
16 | > return orig(*args, **kwargs) |
|
16 | > return orig(*args, **kwargs) | |
17 | > extensions.wrapfunction(util.atomictempfile, 'close', close) |
|
17 | > extensions.wrapfunction(util.atomictempfile, 'close', close) | |
18 | > EOF |
|
18 | > EOF | |
19 |
|
19 | |||
20 |
|
20 | |||
21 | Test offset computation to correctly factor in the index entries themselve. |
|
21 | Test offset computation to correctly factor in the index entries themselve. | |
22 | Also test that the new data size has the correct size if the transaction is aborted |
|
22 | Also test that the new data size has the correct size if the transaction is aborted | |
23 | after the index has been replaced. |
|
23 | after the index has been replaced. | |
24 |
|
24 | |||
25 | Test repo has one small, one moderate and one big change. The clone has |
|
25 | Test repo has one small, one moderate and one big change. The clone has | |
26 | the small and moderate change and will transition to non-inline storage when |
|
26 | the small and moderate change and will transition to non-inline storage when | |
27 | adding the big change. |
|
27 | adding the big change. | |
28 |
|
28 | |||
29 | $ hg init troffset-computation --config format.revlog-compression=none |
|
29 | $ hg init troffset-computation --config format.revlog-compression=none | |
30 | $ cd troffset-computation |
|
30 | $ cd troffset-computation | |
31 | $ printf '% 20d' '1' > file |
|
31 | $ printf '% 20d' '1' > file | |
32 | $ hg commit -Aqm_ |
|
32 | $ hg commit -Aqm_ | |
33 | $ printf '% 1024d' '1' > file |
|
33 | $ printf '% 1024d' '1' > file | |
34 | $ hg commit -Aqm_ |
|
34 | $ hg commit -Aqm_ | |
35 | $ dd if=/dev/zero of=file bs=1k count=128 > /dev/null 2>&1 |
|
35 | $ dd if=/dev/zero of=file bs=1k count=128 > /dev/null 2>&1 | |
36 | $ hg commit -Aqm_ |
|
36 | $ hg commit -Aqm_ | |
37 | $ cd .. |
|
37 | $ cd .. | |
38 |
|
38 | |||
39 | $ hg clone -r 1 troffset-computation troffset-computation-copy --config format.revlog-compression=none -q |
|
39 | $ hg clone -r 1 troffset-computation troffset-computation-copy --config format.revlog-compression=none -q | |
40 | $ cd troffset-computation-copy |
|
40 | $ cd troffset-computation-copy | |
41 |
|
41 | |||
42 | Reference size: |
|
42 | Reference size: | |
43 |
|
43 | |||
44 | $ f -s .hg/store/data/file* |
|
44 | $ f -s .hg/store/data/file* | |
45 | .hg/store/data/file.i: size=1174 |
|
45 | .hg/store/data/file.i: size=1174 | |
46 |
|
46 | |||
47 | $ cat > .hg/hgrc <<EOF |
|
47 | $ cat > .hg/hgrc <<EOF | |
48 | > [hooks] |
|
48 | > [hooks] | |
49 | > pretxnchangegroup = python:$TESTDIR/helper-killhook.py:killme |
|
49 | > pretxnchangegroup = python:$TESTDIR/helper-killhook.py:killme | |
50 | > EOF |
|
50 | > EOF | |
51 | $ hg pull ../troffset-computation |
|
51 | $ hg pull ../troffset-computation | |
52 | pulling from ../troffset-computation |
|
52 | pulling from ../troffset-computation | |
53 | [80] |
|
53 | [80] | |
54 |
|
54 | |||
55 | The first file.i entry should match the size above. |
|
55 | The first file.i entry should match the size above. | |
56 | The first file.d entry is the temporary record during the split, |
|
56 | The first file.d entry is the temporary record during the split, | |
57 | the second entry after the split happened. The sum of the second file.d |
|
57 | the second entry after the split happened. The sum of the second file.d | |
58 | and the second file.i entry should match the first file.i entry. |
|
58 | and the second file.i entry should match the first file.i entry. | |
59 |
|
59 | |||
60 | $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file |
|
60 | $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file | |
61 | data/file.i 1174 |
|
61 | data/file.i 1174 | |
62 | data/file.d 0 |
|
62 | data/file.d 0 | |
63 | data/file.d 1046 |
|
63 | data/file.d 1046 | |
64 | data/file.i 128 |
|
64 | data/file.i 128 | |
|
65 | $ hg recover | |||
|
66 | rolling back interrupted transaction | |||
|
67 | (verify step skipped, run `hg verify` to check your repository content) | |||
|
68 | $ f -s .hg/store/data/file* | |||
|
69 | .hg/store/data/file.d: size=1046 | |||
|
70 | .hg/store/data/file.i: size=128 | |||
|
71 | $ hg tip | |||
|
72 | changeset: 1:3ce491143aec | |||
|
73 | tag: tip | |||
|
74 | user: test | |||
|
75 | date: Thu Jan 01 00:00:00 1970 +0000 | |||
|
76 | summary: _ | |||
|
77 | ||||
|
78 | $ hg verify | |||
|
79 | checking changesets | |||
|
80 | checking manifests | |||
|
81 | crosschecking files in changesets and manifests | |||
|
82 | checking files | |||
|
83 | warning: revlog 'data/file.d' not in fncache! | |||
|
84 | checked 2 changesets with 2 changes to 1 files | |||
|
85 | 1 warnings encountered! | |||
|
86 | hint: run "hg debugrebuildfncache" to recover from corrupt fncache | |||
65 |
$ |
|
87 | $ cd .. | |
66 |
|
88 | |||
67 | Now retry the same but intercept the rename of the index and check that |
|
89 | ||
|
90 | Now retry the procedure but intercept the rename of the index and check that | |||
68 | the journal does not contain the new index size. This demonstrates the edge case |
|
91 | the journal does not contain the new index size. This demonstrates the edge case | |
69 | where the data file is left as garbage. |
|
92 | where the data file is left as garbage. | |
70 |
|
93 | |||
71 | $ hg clone -r 1 troffset-computation troffset-computation-copy2 --config format.revlog-compression=none -q |
|
94 | $ hg clone -r 1 troffset-computation troffset-computation-copy2 --config format.revlog-compression=none -q | |
72 | $ cd troffset-computation-copy2 |
|
95 | $ cd troffset-computation-copy2 | |
73 | $ cat > .hg/hgrc <<EOF |
|
96 | $ cat > .hg/hgrc <<EOF | |
74 | > [extensions] |
|
97 | > [extensions] | |
75 | > intercept_rename = $TESTTMP/intercept_rename.py |
|
98 | > intercept_rename = $TESTTMP/intercept_rename.py | |
76 | > [hooks] |
|
99 | > [hooks] | |
77 | > pretxnchangegroup = python:$TESTDIR/helper-killhook.py:killme |
|
100 | > pretxnchangegroup = python:$TESTDIR/helper-killhook.py:killme | |
78 | > EOF |
|
101 | > EOF | |
79 | $ hg pull ../troffset-computation |
|
102 | $ hg pull ../troffset-computation | |
80 | pulling from ../troffset-computation |
|
103 | pulling from ../troffset-computation | |
81 | [80] |
|
104 | [80] | |
82 | $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file |
|
105 | $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file | |
83 | data/file.i 1174 |
|
106 | data/file.i 1174 | |
84 | data/file.d 0 |
|
107 | data/file.d 0 | |
85 | data/file.d 1046 |
|
108 | data/file.d 1046 | |
|
109 | ||||
|
110 | $ hg recover | |||
|
111 | rolling back interrupted transaction | |||
|
112 | (verify step skipped, run `hg verify` to check your repository content) | |||
|
113 | $ f -s .hg/store/data/file* | |||
|
114 | .hg/store/data/file.d: size=1046 | |||
|
115 | .hg/store/data/file.i: size=1174 | |||
|
116 | $ hg tip | |||
|
117 | changeset: 1:3ce491143aec | |||
|
118 | tag: tip | |||
|
119 | user: test | |||
|
120 | date: Thu Jan 01 00:00:00 1970 +0000 | |||
|
121 | summary: _ | |||
|
122 | ||||
|
123 | $ hg verify | |||
|
124 | checking changesets | |||
|
125 | checking manifests | |||
|
126 | crosschecking files in changesets and manifests | |||
|
127 | checking files | |||
|
128 | checked 2 changesets with 2 changes to 1 files | |||
86 |
$ |
|
129 | $ cd .. | |
|
130 | ||||
|
131 | ||||
|
132 | Repeat the original test but let hg rollback the transaction. | |||
|
133 | ||||
|
134 | $ hg clone -r 1 troffset-computation troffset-computation-copy-rb --config format.revlog-compression=none -q | |||
|
135 | $ cd troffset-computation-copy-rb | |||
|
136 | $ cat > .hg/hgrc <<EOF | |||
|
137 | > [hooks] | |||
|
138 | > pretxnchangegroup = false | |||
|
139 | > EOF | |||
|
140 | $ hg pull ../troffset-computation | |||
|
141 | pulling from ../troffset-computation | |||
|
142 | searching for changes | |||
|
143 | adding changesets | |||
|
144 | adding manifests | |||
|
145 | adding file changes | |||
|
146 | transaction abort! | |||
|
147 | rollback completed | |||
|
148 | abort: pretxnchangegroup hook exited with status 1 | |||
|
149 | [40] | |||
|
150 | $ f -s .hg/store/data/file* | |||
|
151 | .hg/store/data/file.d: size=1046 | |||
|
152 | .hg/store/data/file.i: size=128 | |||
|
153 | $ hg tip | |||
|
154 | changeset: 1:3ce491143aec | |||
|
155 | tag: tip | |||
|
156 | user: test | |||
|
157 | date: Thu Jan 01 00:00:00 1970 +0000 | |||
|
158 | summary: _ | |||
|
159 | ||||
|
160 | $ hg verify | |||
|
161 | checking changesets | |||
|
162 | checking manifests | |||
|
163 | crosschecking files in changesets and manifests | |||
|
164 | checking files | |||
|
165 | warning: revlog 'data/file.d' not in fncache! | |||
|
166 | checked 2 changesets with 2 changes to 1 files | |||
|
167 | 1 warnings encountered! | |||
|
168 | hint: run "hg debugrebuildfncache" to recover from corrupt fncache | |||
|
169 | $ cd .. | |||
|
170 |
General Comments 0
You need to be logged in to leave comments.
Login now