##// END OF EJS Templates
repair: speed up stripping of many roots...
Martin von Zweigbergk -
r30706:2e486264 default
parent child Browse files
Show More
@@ -1,362 +1,362 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 error,
19 error,
20 exchange,
20 exchange,
21 obsolete,
21 obsolete,
22 util,
22 util,
23 )
23 )
24
24
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 """create a bundle with the specified revisions as a backup"""
26 """create a bundle with the specified revisions as a backup"""
27 cgversion = changegroup.safeversion(repo)
27 cgversion = changegroup.safeversion(repo)
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
52 compression=comp)
53
53
54 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
56 files = set()
56 files = set()
57
57
58 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
59 files.update(repo[x].files())
60
60
61 return sorted(files)
61 return sorted(files)
62
62
63 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
65 s = set()
65 s = set()
66 def collectone(revlog):
66 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
69
69
70 collectone(repo.manifestlog._revlog)
70 collectone(repo.manifestlog._revlog)
71 for fname in files:
71 for fname in files:
72 collectone(repo.file(fname))
72 collectone(repo.file(fname))
73
73
74 return s
74 return s
75
75
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 # This function operates within a transaction of its own, but does
77 # This function operates within a transaction of its own, but does
78 # not take any lock on the repo.
78 # not take any lock on the repo.
79 # Simple way to maintain backwards compatibility for this
79 # Simple way to maintain backwards compatibility for this
80 # argument.
80 # argument.
81 if backup in ['none', 'strip']:
81 if backup in ['none', 'strip']:
82 backup = False
82 backup = False
83
83
84 repo = repo.unfiltered()
84 repo = repo.unfiltered()
85 repo.destroying()
85 repo.destroying()
86
86
87 cl = repo.changelog
87 cl = repo.changelog
88 # TODO handle undo of merge sets
88 # TODO handle undo of merge sets
89 if isinstance(nodelist, str):
89 if isinstance(nodelist, str):
90 nodelist = [nodelist]
90 nodelist = [nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
92 striprev = min(striplist)
92 striprev = min(striplist)
93
93
94 # Some revisions with rev > striprev may not be descendants of striprev.
94 # Some revisions with rev > striprev may not be descendants of striprev.
95 # We have to find these revisions and put them in a bundle, so that
95 # We have to find these revisions and put them in a bundle, so that
96 # we can restore them after the truncations.
96 # we can restore them after the truncations.
97 # To create the bundle we use repo.changegroupsubset which requires
97 # To create the bundle we use repo.changegroupsubset which requires
98 # the list of heads and bases of the set of interesting revisions.
98 # the list of heads and bases of the set of interesting revisions.
99 # (head = revision in the set that has no descendant in the set;
99 # (head = revision in the set that has no descendant in the set;
100 # base = revision in the set that has no ancestor in the set)
100 # base = revision in the set that has no ancestor in the set)
101 tostrip = set(striplist)
101 tostrip = set(striplist)
102 for rev in striplist:
102 for r in cl.revs(start=striprev + 1):
103 for desc in cl.descendants([rev]):
103 if any(p in tostrip for p in cl.parentrevs(r)):
104 tostrip.add(desc)
104 tostrip.add(r)
105
105
106 files = _collectfiles(repo, striprev)
106 files = _collectfiles(repo, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
108
108
109 # compute heads
109 # compute heads
110 saveheads = set(saverevs)
110 saveheads = set(saverevs)
111 for r in xrange(striprev + 1, len(cl)):
111 for r in xrange(striprev + 1, len(cl)):
112 if r not in tostrip:
112 if r not in tostrip:
113 saverevs.add(r)
113 saverevs.add(r)
114 saveheads.difference_update(cl.parentrevs(r))
114 saveheads.difference_update(cl.parentrevs(r))
115 saveheads.add(r)
115 saveheads.add(r)
116 saveheads = [cl.node(r) for r in saveheads]
116 saveheads = [cl.node(r) for r in saveheads]
117
117
118 # compute base nodes
118 # compute base nodes
119 if saverevs:
119 if saverevs:
120 descendants = set(cl.descendants(saverevs))
120 descendants = set(cl.descendants(saverevs))
121 saverevs.difference_update(descendants)
121 saverevs.difference_update(descendants)
122 savebases = [cl.node(r) for r in saverevs]
122 savebases = [cl.node(r) for r in saverevs]
123 stripbases = [cl.node(r) for r in tostrip]
123 stripbases = [cl.node(r) for r in tostrip]
124
124
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 # is much faster
126 # is much faster
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 if newbmtarget:
128 if newbmtarget:
129 newbmtarget = repo[newbmtarget.first()].node()
129 newbmtarget = repo[newbmtarget.first()].node()
130 else:
130 else:
131 newbmtarget = '.'
131 newbmtarget = '.'
132
132
133 bm = repo._bookmarks
133 bm = repo._bookmarks
134 updatebm = []
134 updatebm = []
135 for m in bm:
135 for m in bm:
136 rev = repo[bm[m]].rev()
136 rev = repo[bm[m]].rev()
137 if rev in tostrip:
137 if rev in tostrip:
138 updatebm.append(m)
138 updatebm.append(m)
139
139
140 # create a changegroup for all the branches we need to keep
140 # create a changegroup for all the branches we need to keep
141 backupfile = None
141 backupfile = None
142 vfs = repo.vfs
142 vfs = repo.vfs
143 node = nodelist[-1]
143 node = nodelist[-1]
144 if backup:
144 if backup:
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 repo.ui.status(_("saved backup bundle to %s\n") %
146 repo.ui.status(_("saved backup bundle to %s\n") %
147 vfs.join(backupfile))
147 vfs.join(backupfile))
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 vfs.join(backupfile))
149 vfs.join(backupfile))
150 tmpbundlefile = None
150 tmpbundlefile = None
151 if saveheads:
151 if saveheads:
152 # do not compress temporary bundle if we remove it from disk later
152 # do not compress temporary bundle if we remove it from disk later
153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
154 compress=False)
154 compress=False)
155
155
156 mfst = repo.manifestlog._revlog
156 mfst = repo.manifestlog._revlog
157
157
158 curtr = repo.currenttransaction()
158 curtr = repo.currenttransaction()
159 if curtr is not None:
159 if curtr is not None:
160 del curtr # avoid carrying reference to transaction for nothing
160 del curtr # avoid carrying reference to transaction for nothing
161 msg = _('programming error: cannot strip from inside a transaction')
161 msg = _('programming error: cannot strip from inside a transaction')
162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
163
163
164 try:
164 try:
165 with repo.transaction("strip") as tr:
165 with repo.transaction("strip") as tr:
166 offset = len(tr.entries)
166 offset = len(tr.entries)
167
167
168 tr.startgroup()
168 tr.startgroup()
169 cl.strip(striprev, tr)
169 cl.strip(striprev, tr)
170 mfst.strip(striprev, tr)
170 mfst.strip(striprev, tr)
171 if 'treemanifest' in repo.requirements: # safe but unnecessary
171 if 'treemanifest' in repo.requirements: # safe but unnecessary
172 # otherwise
172 # otherwise
173 for unencoded, encoded, size in repo.store.datafiles():
173 for unencoded, encoded, size in repo.store.datafiles():
174 if (unencoded.startswith('meta/') and
174 if (unencoded.startswith('meta/') and
175 unencoded.endswith('00manifest.i')):
175 unencoded.endswith('00manifest.i')):
176 dir = unencoded[5:-12]
176 dir = unencoded[5:-12]
177 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
177 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
178 for fn in files:
178 for fn in files:
179 repo.file(fn).strip(striprev, tr)
179 repo.file(fn).strip(striprev, tr)
180 tr.endgroup()
180 tr.endgroup()
181
181
182 for i in xrange(offset, len(tr.entries)):
182 for i in xrange(offset, len(tr.entries)):
183 file, troffset, ignore = tr.entries[i]
183 file, troffset, ignore = tr.entries[i]
184 with repo.svfs(file, 'a', checkambig=True) as fp:
184 with repo.svfs(file, 'a', checkambig=True) as fp:
185 fp.truncate(troffset)
185 fp.truncate(troffset)
186 if troffset == 0:
186 if troffset == 0:
187 repo.store.markremoved(file)
187 repo.store.markremoved(file)
188
188
189 if tmpbundlefile:
189 if tmpbundlefile:
190 ui.note(_("adding branch\n"))
190 ui.note(_("adding branch\n"))
191 f = vfs.open(tmpbundlefile, "rb")
191 f = vfs.open(tmpbundlefile, "rb")
192 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
192 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
193 if not repo.ui.verbose:
193 if not repo.ui.verbose:
194 # silence internal shuffling chatter
194 # silence internal shuffling chatter
195 repo.ui.pushbuffer()
195 repo.ui.pushbuffer()
196 if isinstance(gen, bundle2.unbundle20):
196 if isinstance(gen, bundle2.unbundle20):
197 with repo.transaction('strip') as tr:
197 with repo.transaction('strip') as tr:
198 tr.hookargs = {'source': 'strip',
198 tr.hookargs = {'source': 'strip',
199 'url': 'bundle:' + vfs.join(tmpbundlefile)}
199 'url': 'bundle:' + vfs.join(tmpbundlefile)}
200 bundle2.applybundle(repo, gen, tr, source='strip',
200 bundle2.applybundle(repo, gen, tr, source='strip',
201 url='bundle:' + vfs.join(tmpbundlefile))
201 url='bundle:' + vfs.join(tmpbundlefile))
202 else:
202 else:
203 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
203 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
204 True)
204 True)
205 if not repo.ui.verbose:
205 if not repo.ui.verbose:
206 repo.ui.popbuffer()
206 repo.ui.popbuffer()
207 f.close()
207 f.close()
208 repo._phasecache.invalidate()
208 repo._phasecache.invalidate()
209
209
210 for m in updatebm:
210 for m in updatebm:
211 bm[m] = repo[newbmtarget].node()
211 bm[m] = repo[newbmtarget].node()
212 lock = tr = None
212 lock = tr = None
213 try:
213 try:
214 lock = repo.lock()
214 lock = repo.lock()
215 tr = repo.transaction('repair')
215 tr = repo.transaction('repair')
216 bm.recordchange(tr)
216 bm.recordchange(tr)
217 tr.close()
217 tr.close()
218 finally:
218 finally:
219 tr.release()
219 tr.release()
220 lock.release()
220 lock.release()
221
221
222 # remove undo files
222 # remove undo files
223 for undovfs, undofile in repo.undofiles():
223 for undovfs, undofile in repo.undofiles():
224 try:
224 try:
225 undovfs.unlink(undofile)
225 undovfs.unlink(undofile)
226 except OSError as e:
226 except OSError as e:
227 if e.errno != errno.ENOENT:
227 if e.errno != errno.ENOENT:
228 ui.warn(_('error removing %s: %s\n') %
228 ui.warn(_('error removing %s: %s\n') %
229 (undovfs.join(undofile), str(e)))
229 (undovfs.join(undofile), str(e)))
230
230
231 except: # re-raises
231 except: # re-raises
232 if backupfile:
232 if backupfile:
233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
234 % vfs.join(backupfile))
234 % vfs.join(backupfile))
235 if tmpbundlefile:
235 if tmpbundlefile:
236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
237 % vfs.join(tmpbundlefile))
237 % vfs.join(tmpbundlefile))
238 ui.warn(_("(fix the problem, then recover the changesets with "
238 ui.warn(_("(fix the problem, then recover the changesets with "
239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
240 raise
240 raise
241 else:
241 else:
242 if tmpbundlefile:
242 if tmpbundlefile:
243 # Remove temporary bundle only if there were no exceptions
243 # Remove temporary bundle only if there were no exceptions
244 vfs.unlink(tmpbundlefile)
244 vfs.unlink(tmpbundlefile)
245
245
246 repo.destroyed()
246 repo.destroyed()
247 # return the backup file path (or None if 'backup' was False) so
247 # return the backup file path (or None if 'backup' was False) so
248 # extensions can use it
248 # extensions can use it
249 return backupfile
249 return backupfile
250
250
251 def rebuildfncache(ui, repo):
251 def rebuildfncache(ui, repo):
252 """Rebuilds the fncache file from repo history.
252 """Rebuilds the fncache file from repo history.
253
253
254 Missing entries will be added. Extra entries will be removed.
254 Missing entries will be added. Extra entries will be removed.
255 """
255 """
256 repo = repo.unfiltered()
256 repo = repo.unfiltered()
257
257
258 if 'fncache' not in repo.requirements:
258 if 'fncache' not in repo.requirements:
259 ui.warn(_('(not rebuilding fncache because repository does not '
259 ui.warn(_('(not rebuilding fncache because repository does not '
260 'support fncache)\n'))
260 'support fncache)\n'))
261 return
261 return
262
262
263 with repo.lock():
263 with repo.lock():
264 fnc = repo.store.fncache
264 fnc = repo.store.fncache
265 # Trigger load of fncache.
265 # Trigger load of fncache.
266 if 'irrelevant' in fnc:
266 if 'irrelevant' in fnc:
267 pass
267 pass
268
268
269 oldentries = set(fnc.entries)
269 oldentries = set(fnc.entries)
270 newentries = set()
270 newentries = set()
271 seenfiles = set()
271 seenfiles = set()
272
272
273 repolen = len(repo)
273 repolen = len(repo)
274 for rev in repo:
274 for rev in repo:
275 ui.progress(_('rebuilding'), rev, total=repolen,
275 ui.progress(_('rebuilding'), rev, total=repolen,
276 unit=_('changesets'))
276 unit=_('changesets'))
277
277
278 ctx = repo[rev]
278 ctx = repo[rev]
279 for f in ctx.files():
279 for f in ctx.files():
280 # This is to minimize I/O.
280 # This is to minimize I/O.
281 if f in seenfiles:
281 if f in seenfiles:
282 continue
282 continue
283 seenfiles.add(f)
283 seenfiles.add(f)
284
284
285 i = 'data/%s.i' % f
285 i = 'data/%s.i' % f
286 d = 'data/%s.d' % f
286 d = 'data/%s.d' % f
287
287
288 if repo.store._exists(i):
288 if repo.store._exists(i):
289 newentries.add(i)
289 newentries.add(i)
290 if repo.store._exists(d):
290 if repo.store._exists(d):
291 newentries.add(d)
291 newentries.add(d)
292
292
293 ui.progress(_('rebuilding'), None)
293 ui.progress(_('rebuilding'), None)
294
294
295 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
295 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
296 for dir in util.dirs(seenfiles):
296 for dir in util.dirs(seenfiles):
297 i = 'meta/%s/00manifest.i' % dir
297 i = 'meta/%s/00manifest.i' % dir
298 d = 'meta/%s/00manifest.d' % dir
298 d = 'meta/%s/00manifest.d' % dir
299
299
300 if repo.store._exists(i):
300 if repo.store._exists(i):
301 newentries.add(i)
301 newentries.add(i)
302 if repo.store._exists(d):
302 if repo.store._exists(d):
303 newentries.add(d)
303 newentries.add(d)
304
304
305 addcount = len(newentries - oldentries)
305 addcount = len(newentries - oldentries)
306 removecount = len(oldentries - newentries)
306 removecount = len(oldentries - newentries)
307 for p in sorted(oldentries - newentries):
307 for p in sorted(oldentries - newentries):
308 ui.write(_('removing %s\n') % p)
308 ui.write(_('removing %s\n') % p)
309 for p in sorted(newentries - oldentries):
309 for p in sorted(newentries - oldentries):
310 ui.write(_('adding %s\n') % p)
310 ui.write(_('adding %s\n') % p)
311
311
312 if addcount or removecount:
312 if addcount or removecount:
313 ui.write(_('%d items added, %d removed from fncache\n') %
313 ui.write(_('%d items added, %d removed from fncache\n') %
314 (addcount, removecount))
314 (addcount, removecount))
315 fnc.entries = newentries
315 fnc.entries = newentries
316 fnc._dirty = True
316 fnc._dirty = True
317
317
318 with repo.transaction('fncache') as tr:
318 with repo.transaction('fncache') as tr:
319 fnc.write(tr)
319 fnc.write(tr)
320 else:
320 else:
321 ui.write(_('fncache already up to date\n'))
321 ui.write(_('fncache already up to date\n'))
322
322
323 def stripbmrevset(repo, mark):
323 def stripbmrevset(repo, mark):
324 """
324 """
325 The revset to strip when strip is called with -B mark
325 The revset to strip when strip is called with -B mark
326
326
327 Needs to live here so extensions can use it and wrap it even when strip is
327 Needs to live here so extensions can use it and wrap it even when strip is
328 not enabled or not present on a box.
328 not enabled or not present on a box.
329 """
329 """
330 return repo.revs("ancestors(bookmark(%s)) - "
330 return repo.revs("ancestors(bookmark(%s)) - "
331 "ancestors(head() and not bookmark(%s)) - "
331 "ancestors(head() and not bookmark(%s)) - "
332 "ancestors(bookmark() and not bookmark(%s))",
332 "ancestors(bookmark() and not bookmark(%s))",
333 mark, mark, mark)
333 mark, mark, mark)
334
334
335 def deleteobsmarkers(obsstore, indices):
335 def deleteobsmarkers(obsstore, indices):
336 """Delete some obsmarkers from obsstore and return how many were deleted
336 """Delete some obsmarkers from obsstore and return how many were deleted
337
337
338 'indices' is a list of ints which are the indices
338 'indices' is a list of ints which are the indices
339 of the markers to be deleted.
339 of the markers to be deleted.
340
340
341 Every invocation of this function completely rewrites the obsstore file,
341 Every invocation of this function completely rewrites the obsstore file,
342 skipping the markers we want to be removed. The new temporary file is
342 skipping the markers we want to be removed. The new temporary file is
343 created, remaining markers are written there and on .close() this file
343 created, remaining markers are written there and on .close() this file
344 gets atomically renamed to obsstore, thus guaranteeing consistency."""
344 gets atomically renamed to obsstore, thus guaranteeing consistency."""
345 if not indices:
345 if not indices:
346 # we don't want to rewrite the obsstore with the same content
346 # we don't want to rewrite the obsstore with the same content
347 return
347 return
348
348
349 left = []
349 left = []
350 current = obsstore._all
350 current = obsstore._all
351 n = 0
351 n = 0
352 for i, m in enumerate(current):
352 for i, m in enumerate(current):
353 if i in indices:
353 if i in indices:
354 n += 1
354 n += 1
355 continue
355 continue
356 left.append(m)
356 left.append(m)
357
357
358 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
358 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
359 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
359 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
360 newobsstorefile.write(bytes)
360 newobsstorefile.write(bytes)
361 newobsstorefile.close()
361 newobsstorefile.close()
362 return n
362 return n
General Comments 0
You need to be logged in to leave comments. Login now