Show More
@@ -105,6 +105,11 b' def _playback(' | |||||
105 | unlink=True, |
|
105 | unlink=True, | |
106 | checkambigfiles=None, |
|
106 | checkambigfiles=None, | |
107 | ): |
|
107 | ): | |
|
108 | """rollback a transaction : | |||
|
109 | - truncate files that have been appended to | |||
|
110 | - restore file backups | |||
|
111 | - delete temporary files | |||
|
112 | """ | |||
108 | backupfiles = [] |
|
113 | backupfiles = [] | |
109 |
|
114 | |||
110 | def restore_one_backup(vfs, f, b, checkambig): |
|
115 | def restore_one_backup(vfs, f, b, checkambig): | |
@@ -118,7 +123,30 b' def _playback(' | |||||
118 | report(_(b"failed to recover %s (%s)\n") % (f, e_msg)) |
|
123 | report(_(b"failed to recover %s (%s)\n") % (f, e_msg)) | |
119 | raise |
|
124 | raise | |
120 |
|
125 | |||
|
126 | # gather all backup files that impact the store | |||
|
127 | # (we need this to detect files that are both backed up and truncated) | |||
|
128 | store_backup = {} | |||
|
129 | for entry in backupentries: | |||
|
130 | location, file_path, backup_path, cache = entry | |||
|
131 | vfs = vfsmap[location] | |||
|
132 | is_store = vfs.join(b'') == opener.join(b'') | |||
|
133 | if is_store and file_path and backup_path: | |||
|
134 | store_backup[file_path] = entry | |||
|
135 | copy_done = set() | |||
|
136 | ||||
|
137 | # truncate all file `f` to offset `o` | |||
121 | for f, o in sorted(dict(entries).items()): |
|
138 | for f, o in sorted(dict(entries).items()): | |
|
139 | # if we have a backup for `f`, we should restore it first and truncate | |||
|
140 | # the restored file | |||
|
141 | bck_entry = store_backup.get(f) | |||
|
142 | if bck_entry is not None: | |||
|
143 | location, file_path, backup_path, cache = bck_entry | |||
|
144 | checkambig = False | |||
|
145 | if checkambigfiles: | |||
|
146 | checkambig = (file_path, location) in checkambigfiles | |||
|
147 | restore_one_backup(opener, file_path, backup_path, checkambig) | |||
|
148 | copy_done.add(bck_entry) | |||
|
149 | # truncate the file to its pre-transaction size | |||
122 | if o or not unlink: |
|
150 | if o or not unlink: | |
123 | checkambig = checkambigfiles and (f, b'') in checkambigfiles |
|
151 | checkambig = checkambigfiles and (f, b'') in checkambigfiles | |
124 | try: |
|
152 | try: | |
@@ -137,12 +165,16 b' def _playback(' | |||||
137 | report(_(b"failed to truncate %s\n") % f) |
|
165 | report(_(b"failed to truncate %s\n") % f) | |
138 | raise |
|
166 | raise | |
139 | else: |
|
167 | else: | |
|
168 | # delete empty file | |||
140 | try: |
|
169 | try: | |
141 | opener.unlink(f) |
|
170 | opener.unlink(f) | |
142 | except FileNotFoundError: |
|
171 | except FileNotFoundError: | |
143 | pass |
|
172 | pass | |
144 |
|
173 | # restore backed up files and clean up temporary files | ||
145 |
for |
|
174 | for entry in backupentries: | |
|
175 | if entry in copy_done: | |||
|
176 | continue | |||
|
177 | l, f, b, c = entry | |||
146 | if l not in vfsmap and c: |
|
178 | if l not in vfsmap and c: | |
147 | report(b"couldn't handle %s: unknown cache location %s\n" % (b, l)) |
|
179 | report(b"couldn't handle %s: unknown cache location %s\n" % (b, l)) | |
148 | vfs = vfsmap[l] |
|
180 | vfs = vfsmap[l] | |
@@ -170,6 +202,7 b' def _playback(' | |||||
170 | if not c: |
|
202 | if not c: | |
171 | raise |
|
203 | raise | |
172 |
|
204 | |||
|
205 | # cleanup transaction state file and the backups file | |||
173 | backuppath = b"%s.backupfiles" % journal |
|
206 | backuppath = b"%s.backupfiles" % journal | |
174 | if opener.exists(backuppath): |
|
207 | if opener.exists(backuppath): | |
175 | opener.unlink(backuppath) |
|
208 | opener.unlink(backuppath) | |
@@ -346,7 +379,7 b' class transaction(util.transactional):' | |||||
346 | self._file.flush() |
|
379 | self._file.flush() | |
347 |
|
380 | |||
348 | @active |
|
381 | @active | |
349 | def addbackup(self, file, hardlink=True, location=b''): |
|
382 | def addbackup(self, file, hardlink=True, location=b'', for_offset=False): | |
350 | """Adds a backup of the file to the transaction |
|
383 | """Adds a backup of the file to the transaction | |
351 |
|
384 | |||
352 | Calling addbackup() creates a hardlink backup of the specified file |
|
385 | Calling addbackup() creates a hardlink backup of the specified file | |
@@ -355,17 +388,25 b' class transaction(util.transactional):' | |||||
355 |
|
388 | |||
356 | * `file`: the file path, relative to .hg/store |
|
389 | * `file`: the file path, relative to .hg/store | |
357 | * `hardlink`: use a hardlink to quickly create the backup |
|
390 | * `hardlink`: use a hardlink to quickly create the backup | |
|
391 | ||||
|
392 | If `for_offset` is set, we expect a offset for this file to have been previously recorded | |||
358 | """ |
|
393 | """ | |
359 | if self._queue: |
|
394 | if self._queue: | |
360 | msg = b'cannot use transaction.addbackup inside "group"' |
|
395 | msg = b'cannot use transaction.addbackup inside "group"' | |
361 | raise error.ProgrammingError(msg) |
|
396 | raise error.ProgrammingError(msg) | |
362 |
|
397 | |||
363 | if ( |
|
398 | if file in self._newfiles or file in self._backupmap: | |
364 | file in self._newfiles |
|
399 | return | |
365 |
|
|
400 | elif file in self._offsetmap and not for_offset: | |
366 | or file in self._backupmap |
|
|||
367 | ): |
|
|||
368 | return |
|
401 | return | |
|
402 | elif for_offset and file not in self._offsetmap: | |||
|
403 | msg = ( | |||
|
404 | 'calling `addbackup` with `for_offmap=True`, ' | |||
|
405 | 'but no offset recorded: [%r] %r' | |||
|
406 | ) | |||
|
407 | msg %= (location, file) | |||
|
408 | raise error.ProgrammingError(msg) | |||
|
409 | ||||
369 | vfs = self._vfsmap[location] |
|
410 | vfs = self._vfsmap[location] | |
370 | dirname, filename = vfs.split(file) |
|
411 | dirname, filename = vfs.split(file) | |
371 | backupfilename = b"%s.backup.%s" % (self._journal, filename) |
|
412 | backupfilename = b"%s.backup.%s" % (self._journal, filename) |
General Comments 0
You need to be logged in to leave comments.
Login now