Show More
@@ -220,7 +220,7 b' def strip(ui, repo, nodelist, backup=Tru' | |||
|
220 | 220 | tr.endgroup() |
|
221 | 221 | |
|
222 | 222 | for i in pycompat.xrange(offset, len(tr._entries)): |
|
223 |
file, troffset |
|
|
223 | file, troffset = tr._entries[i] | |
|
224 | 224 | with repo.svfs(file, b'a', checkambig=True) as fp: |
|
225 | 225 | fp.truncate(troffset) |
|
226 | 226 | if troffset == 0: |
@@ -2005,16 +2005,9 b' class revlog(object):' | |||
|
2005 | 2005 | raise error.RevlogError( |
|
2006 | 2006 | _(b"%s not found in the transaction") % self.indexfile |
|
2007 | 2007 | ) |
|
2008 | ||
|
2009 |
trindex = |
|
|
2010 | if trindex is not None: | |
|
2011 | dataoff = self.start(trindex) | |
|
2012 | else: | |
|
2013 | # revlog was stripped at start of transaction, use all leftover data | |
|
2014 | trindex = len(self) - 1 | |
|
2015 | dataoff = self.end(tiprev) | |
|
2016 | ||
|
2017 | tr.add(self.datafile, dataoff) | |
|
2008 | troffset = trinfo[1] | |
|
2009 | trindex = 0 | |
|
2010 | tr.add(self.datafile, 0) | |
|
2018 | 2011 | |
|
2019 | 2012 | if fp: |
|
2020 | 2013 | fp.flush() |
@@ -2026,6 +2019,8 b' class revlog(object):' | |||
|
2026 | 2019 | with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh: |
|
2027 | 2020 | for r in self: |
|
2028 | 2021 | dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1]) |
|
2022 | if troffset <= self.start(r): | |
|
2023 | trindex = r | |
|
2029 | 2024 | |
|
2030 | 2025 | with self._indexfp(b'w') as fp: |
|
2031 | 2026 | self.version &= ~FLAG_INLINE_DATA |
@@ -2361,7 +2356,7 b' class revlog(object):' | |||
|
2361 | 2356 | ifh.write(entry) |
|
2362 | 2357 | else: |
|
2363 | 2358 | offset += curr * self._io.size |
|
2364 |
transaction.add(self.indexfile, offset |
|
|
2359 | transaction.add(self.indexfile, offset) | |
|
2365 | 2360 | ifh.write(entry) |
|
2366 | 2361 | ifh.write(data[0]) |
|
2367 | 2362 | ifh.write(data[1]) |
@@ -2397,10 +2392,10 b' class revlog(object):' | |||
|
2397 | 2392 | ifh = self._indexfp(b"a+") |
|
2398 | 2393 | isize = r * self._io.size |
|
2399 | 2394 | if self._inline: |
|
2400 |
transaction.add(self.indexfile, end + isize |
|
|
2395 | transaction.add(self.indexfile, end + isize) | |
|
2401 | 2396 | dfh = None |
|
2402 | 2397 | else: |
|
2403 |
transaction.add(self.indexfile, isize |
|
|
2398 | transaction.add(self.indexfile, isize) | |
|
2404 | 2399 | transaction.add(self.datafile, end) |
|
2405 | 2400 | dfh = self._datafp(b"a+") |
|
2406 | 2401 |
@@ -56,7 +56,7 b' def _playback(' | |||
|
56 | 56 | unlink=True, |
|
57 | 57 | checkambigfiles=None, |
|
58 | 58 | ): |
|
59 |
for f, o |
|
|
59 | for f, o in entries: | |
|
60 | 60 | if o or not unlink: |
|
61 | 61 | checkambig = checkambigfiles and (f, b'') in checkambigfiles |
|
62 | 62 | try: |
@@ -243,25 +243,25 b' class transaction(util.transactional):' | |||
|
243 | 243 | This is used by strip to delay vision of strip offset. The transaction |
|
244 | 244 | sees either none or all of the strip actions to be done.""" |
|
245 | 245 | q = self._queue.pop() |
|
246 |
for f, o |
|
|
247 |
self._addentry(f, o |
|
|
246 | for f, o in q: | |
|
247 | self._addentry(f, o) | |
|
248 | 248 | |
|
249 | 249 | @active |
|
250 |
def add(self, file, offset |
|
|
250 | def add(self, file, offset): | |
|
251 | 251 | """record the state of an append-only file before update""" |
|
252 | 252 | if file in self._map or file in self._backupmap: |
|
253 | 253 | return |
|
254 | 254 | if self._queue: |
|
255 |
self._queue[-1].append((file, offset |
|
|
255 | self._queue[-1].append((file, offset)) | |
|
256 | 256 | return |
|
257 | 257 | |
|
258 |
self._addentry(file, offset |
|
|
258 | self._addentry(file, offset) | |
|
259 | 259 | |
|
260 |
def _addentry(self, file, offset |
|
|
260 | def _addentry(self, file, offset): | |
|
261 | 261 | """add a append-only entry to memory and on-disk state""" |
|
262 | 262 | if file in self._map or file in self._backupmap: |
|
263 | 263 | return |
|
264 |
self._entries.append((file, offset |
|
|
264 | self._entries.append((file, offset)) | |
|
265 | 265 | self._map[file] = len(self._entries) - 1 |
|
266 | 266 | # add enough data to the journal to do the truncate |
|
267 | 267 | self._file.write(b"%s\0%d\n" % (file, offset)) |
@@ -403,7 +403,7 b' class transaction(util.transactional):' | |||
|
403 | 403 | return None |
|
404 | 404 | |
|
405 | 405 | @active |
|
406 |
def replace(self, file, offset |
|
|
406 | def replace(self, file, offset): | |
|
407 | 407 | ''' |
|
408 | 408 | replace can only replace already committed entries |
|
409 | 409 | that are not pending in the queue |
@@ -412,7 +412,7 b' class transaction(util.transactional):' | |||
|
412 | 412 | if file not in self._map: |
|
413 | 413 | raise KeyError(file) |
|
414 | 414 | index = self._map[file] |
|
415 |
self._entries[index] = (file, offset |
|
|
415 | self._entries[index] = (file, offset) | |
|
416 | 416 | self._file.write(b"%s\0%d\n" % (file, offset)) |
|
417 | 417 | self._file.flush() |
|
418 | 418 | |
@@ -696,7 +696,7 b' def rollback(opener, vfsmap, file, repor' | |||
|
696 | 696 | for l in lines: |
|
697 | 697 | try: |
|
698 | 698 | f, o = l.split(b'\0') |
|
699 |
entries.append((f, int(o) |
|
|
699 | entries.append((f, int(o))) | |
|
700 | 700 | except ValueError: |
|
701 | 701 | report( |
|
702 | 702 | _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l) |
@@ -45,7 +45,7 b' test qpush on empty series' | |||
|
45 | 45 | > # Touching files truncated at "transaction.abort" causes |
|
46 | 46 | > # forcible re-loading invalidated filecache properties |
|
47 | 47 | > # (including repo.changelog) |
|
48 |
> for f, o |
|
|
48 | > for f, o in entries: | |
|
49 | 49 | > if o or not unlink: |
|
50 | 50 | > os.utime(opener.join(f), (0.0, 0.0)) |
|
51 | 51 | > def extsetup(ui): |
General Comments 0
You need to be logged in to leave comments.
Login now