##// END OF EJS Templates
repair: combine two loops over changelog revisions...
Martin von Zweigbergk -
r30707:987dbe87 default
parent child Browse files
Show More
@@ -1,362 +1,360 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 error,
19 error,
20 exchange,
20 exchange,
21 obsolete,
21 obsolete,
22 util,
22 util,
23 )
23 )
24
24
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 """create a bundle with the specified revisions as a backup"""
26 """create a bundle with the specified revisions as a backup"""
27 cgversion = changegroup.safeversion(repo)
27 cgversion = changegroup.safeversion(repo)
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
52 compression=comp)
53
53
54 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
56 files = set()
56 files = set()
57
57
58 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
59 files.update(repo[x].files())
60
60
61 return sorted(files)
61 return sorted(files)
62
62
63 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
65 s = set()
65 s = set()
66 def collectone(revlog):
66 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
69
69
70 collectone(repo.manifestlog._revlog)
70 collectone(repo.manifestlog._revlog)
71 for fname in files:
71 for fname in files:
72 collectone(repo.file(fname))
72 collectone(repo.file(fname))
73
73
74 return s
74 return s
75
75
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 # This function operates within a transaction of its own, but does
77 # This function operates within a transaction of its own, but does
78 # not take any lock on the repo.
78 # not take any lock on the repo.
79 # Simple way to maintain backwards compatibility for this
79 # Simple way to maintain backwards compatibility for this
80 # argument.
80 # argument.
81 if backup in ['none', 'strip']:
81 if backup in ['none', 'strip']:
82 backup = False
82 backup = False
83
83
84 repo = repo.unfiltered()
84 repo = repo.unfiltered()
85 repo.destroying()
85 repo.destroying()
86
86
87 cl = repo.changelog
87 cl = repo.changelog
88 # TODO handle undo of merge sets
88 # TODO handle undo of merge sets
89 if isinstance(nodelist, str):
89 if isinstance(nodelist, str):
90 nodelist = [nodelist]
90 nodelist = [nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
92 striprev = min(striplist)
92 striprev = min(striplist)
93
93
94 files = _collectfiles(repo, striprev)
95 saverevs = _collectbrokencsets(repo, files, striprev)
96
94 # Some revisions with rev > striprev may not be descendants of striprev.
97 # Some revisions with rev > striprev may not be descendants of striprev.
95 # We have to find these revisions and put them in a bundle, so that
98 # We have to find these revisions and put them in a bundle, so that
96 # we can restore them after the truncations.
99 # we can restore them after the truncations.
97 # To create the bundle we use repo.changegroupsubset which requires
100 # To create the bundle we use repo.changegroupsubset which requires
98 # the list of heads and bases of the set of interesting revisions.
101 # the list of heads and bases of the set of interesting revisions.
99 # (head = revision in the set that has no descendant in the set;
102 # (head = revision in the set that has no descendant in the set;
100 # base = revision in the set that has no ancestor in the set)
103 # base = revision in the set that has no ancestor in the set)
101 tostrip = set(striplist)
104 tostrip = set(striplist)
105 saveheads = set(saverevs)
102 for r in cl.revs(start=striprev + 1):
106 for r in cl.revs(start=striprev + 1):
103 if any(p in tostrip for p in cl.parentrevs(r)):
107 if any(p in tostrip for p in cl.parentrevs(r)):
104 tostrip.add(r)
108 tostrip.add(r)
105
109
106 files = _collectfiles(repo, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
108
109 # compute heads
110 saveheads = set(saverevs)
111 for r in xrange(striprev + 1, len(cl)):
112 if r not in tostrip:
110 if r not in tostrip:
113 saverevs.add(r)
111 saverevs.add(r)
114 saveheads.difference_update(cl.parentrevs(r))
112 saveheads.difference_update(cl.parentrevs(r))
115 saveheads.add(r)
113 saveheads.add(r)
116 saveheads = [cl.node(r) for r in saveheads]
114 saveheads = [cl.node(r) for r in saveheads]
117
115
118 # compute base nodes
116 # compute base nodes
119 if saverevs:
117 if saverevs:
120 descendants = set(cl.descendants(saverevs))
118 descendants = set(cl.descendants(saverevs))
121 saverevs.difference_update(descendants)
119 saverevs.difference_update(descendants)
122 savebases = [cl.node(r) for r in saverevs]
120 savebases = [cl.node(r) for r in saverevs]
123 stripbases = [cl.node(r) for r in tostrip]
121 stripbases = [cl.node(r) for r in tostrip]
124
122
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 # is much faster
124 # is much faster
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 if newbmtarget:
126 if newbmtarget:
129 newbmtarget = repo[newbmtarget.first()].node()
127 newbmtarget = repo[newbmtarget.first()].node()
130 else:
128 else:
131 newbmtarget = '.'
129 newbmtarget = '.'
132
130
133 bm = repo._bookmarks
131 bm = repo._bookmarks
134 updatebm = []
132 updatebm = []
135 for m in bm:
133 for m in bm:
136 rev = repo[bm[m]].rev()
134 rev = repo[bm[m]].rev()
137 if rev in tostrip:
135 if rev in tostrip:
138 updatebm.append(m)
136 updatebm.append(m)
139
137
140 # create a changegroup for all the branches we need to keep
138 # create a changegroup for all the branches we need to keep
141 backupfile = None
139 backupfile = None
142 vfs = repo.vfs
140 vfs = repo.vfs
143 node = nodelist[-1]
141 node = nodelist[-1]
144 if backup:
142 if backup:
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 repo.ui.status(_("saved backup bundle to %s\n") %
144 repo.ui.status(_("saved backup bundle to %s\n") %
147 vfs.join(backupfile))
145 vfs.join(backupfile))
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 vfs.join(backupfile))
147 vfs.join(backupfile))
150 tmpbundlefile = None
148 tmpbundlefile = None
151 if saveheads:
149 if saveheads:
152 # do not compress temporary bundle if we remove it from disk later
150 # do not compress temporary bundle if we remove it from disk later
153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
151 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
154 compress=False)
152 compress=False)
155
153
156 mfst = repo.manifestlog._revlog
154 mfst = repo.manifestlog._revlog
157
155
158 curtr = repo.currenttransaction()
156 curtr = repo.currenttransaction()
159 if curtr is not None:
157 if curtr is not None:
160 del curtr # avoid carrying reference to transaction for nothing
158 del curtr # avoid carrying reference to transaction for nothing
161 msg = _('programming error: cannot strip from inside a transaction')
159 msg = _('programming error: cannot strip from inside a transaction')
162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
160 raise error.Abort(msg, hint=_('contact your extension maintainer'))
163
161
164 try:
162 try:
165 with repo.transaction("strip") as tr:
163 with repo.transaction("strip") as tr:
166 offset = len(tr.entries)
164 offset = len(tr.entries)
167
165
168 tr.startgroup()
166 tr.startgroup()
169 cl.strip(striprev, tr)
167 cl.strip(striprev, tr)
170 mfst.strip(striprev, tr)
168 mfst.strip(striprev, tr)
171 if 'treemanifest' in repo.requirements: # safe but unnecessary
169 if 'treemanifest' in repo.requirements: # safe but unnecessary
172 # otherwise
170 # otherwise
173 for unencoded, encoded, size in repo.store.datafiles():
171 for unencoded, encoded, size in repo.store.datafiles():
174 if (unencoded.startswith('meta/') and
172 if (unencoded.startswith('meta/') and
175 unencoded.endswith('00manifest.i')):
173 unencoded.endswith('00manifest.i')):
176 dir = unencoded[5:-12]
174 dir = unencoded[5:-12]
177 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
175 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
178 for fn in files:
176 for fn in files:
179 repo.file(fn).strip(striprev, tr)
177 repo.file(fn).strip(striprev, tr)
180 tr.endgroup()
178 tr.endgroup()
181
179
182 for i in xrange(offset, len(tr.entries)):
180 for i in xrange(offset, len(tr.entries)):
183 file, troffset, ignore = tr.entries[i]
181 file, troffset, ignore = tr.entries[i]
184 with repo.svfs(file, 'a', checkambig=True) as fp:
182 with repo.svfs(file, 'a', checkambig=True) as fp:
185 fp.truncate(troffset)
183 fp.truncate(troffset)
186 if troffset == 0:
184 if troffset == 0:
187 repo.store.markremoved(file)
185 repo.store.markremoved(file)
188
186
189 if tmpbundlefile:
187 if tmpbundlefile:
190 ui.note(_("adding branch\n"))
188 ui.note(_("adding branch\n"))
191 f = vfs.open(tmpbundlefile, "rb")
189 f = vfs.open(tmpbundlefile, "rb")
192 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
190 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
193 if not repo.ui.verbose:
191 if not repo.ui.verbose:
194 # silence internal shuffling chatter
192 # silence internal shuffling chatter
195 repo.ui.pushbuffer()
193 repo.ui.pushbuffer()
196 if isinstance(gen, bundle2.unbundle20):
194 if isinstance(gen, bundle2.unbundle20):
197 with repo.transaction('strip') as tr:
195 with repo.transaction('strip') as tr:
198 tr.hookargs = {'source': 'strip',
196 tr.hookargs = {'source': 'strip',
199 'url': 'bundle:' + vfs.join(tmpbundlefile)}
197 'url': 'bundle:' + vfs.join(tmpbundlefile)}
200 bundle2.applybundle(repo, gen, tr, source='strip',
198 bundle2.applybundle(repo, gen, tr, source='strip',
201 url='bundle:' + vfs.join(tmpbundlefile))
199 url='bundle:' + vfs.join(tmpbundlefile))
202 else:
200 else:
203 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
201 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
204 True)
202 True)
205 if not repo.ui.verbose:
203 if not repo.ui.verbose:
206 repo.ui.popbuffer()
204 repo.ui.popbuffer()
207 f.close()
205 f.close()
208 repo._phasecache.invalidate()
206 repo._phasecache.invalidate()
209
207
210 for m in updatebm:
208 for m in updatebm:
211 bm[m] = repo[newbmtarget].node()
209 bm[m] = repo[newbmtarget].node()
212 lock = tr = None
210 lock = tr = None
213 try:
211 try:
214 lock = repo.lock()
212 lock = repo.lock()
215 tr = repo.transaction('repair')
213 tr = repo.transaction('repair')
216 bm.recordchange(tr)
214 bm.recordchange(tr)
217 tr.close()
215 tr.close()
218 finally:
216 finally:
219 tr.release()
217 tr.release()
220 lock.release()
218 lock.release()
221
219
222 # remove undo files
220 # remove undo files
223 for undovfs, undofile in repo.undofiles():
221 for undovfs, undofile in repo.undofiles():
224 try:
222 try:
225 undovfs.unlink(undofile)
223 undovfs.unlink(undofile)
226 except OSError as e:
224 except OSError as e:
227 if e.errno != errno.ENOENT:
225 if e.errno != errno.ENOENT:
228 ui.warn(_('error removing %s: %s\n') %
226 ui.warn(_('error removing %s: %s\n') %
229 (undovfs.join(undofile), str(e)))
227 (undovfs.join(undofile), str(e)))
230
228
231 except: # re-raises
229 except: # re-raises
232 if backupfile:
230 if backupfile:
233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
231 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
234 % vfs.join(backupfile))
232 % vfs.join(backupfile))
235 if tmpbundlefile:
233 if tmpbundlefile:
236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
234 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
237 % vfs.join(tmpbundlefile))
235 % vfs.join(tmpbundlefile))
238 ui.warn(_("(fix the problem, then recover the changesets with "
236 ui.warn(_("(fix the problem, then recover the changesets with "
239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
237 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
240 raise
238 raise
241 else:
239 else:
242 if tmpbundlefile:
240 if tmpbundlefile:
243 # Remove temporary bundle only if there were no exceptions
241 # Remove temporary bundle only if there were no exceptions
244 vfs.unlink(tmpbundlefile)
242 vfs.unlink(tmpbundlefile)
245
243
246 repo.destroyed()
244 repo.destroyed()
247 # return the backup file path (or None if 'backup' was False) so
245 # return the backup file path (or None if 'backup' was False) so
248 # extensions can use it
246 # extensions can use it
249 return backupfile
247 return backupfile
250
248
251 def rebuildfncache(ui, repo):
249 def rebuildfncache(ui, repo):
252 """Rebuilds the fncache file from repo history.
250 """Rebuilds the fncache file from repo history.
253
251
254 Missing entries will be added. Extra entries will be removed.
252 Missing entries will be added. Extra entries will be removed.
255 """
253 """
256 repo = repo.unfiltered()
254 repo = repo.unfiltered()
257
255
258 if 'fncache' not in repo.requirements:
256 if 'fncache' not in repo.requirements:
259 ui.warn(_('(not rebuilding fncache because repository does not '
257 ui.warn(_('(not rebuilding fncache because repository does not '
260 'support fncache)\n'))
258 'support fncache)\n'))
261 return
259 return
262
260
263 with repo.lock():
261 with repo.lock():
264 fnc = repo.store.fncache
262 fnc = repo.store.fncache
265 # Trigger load of fncache.
263 # Trigger load of fncache.
266 if 'irrelevant' in fnc:
264 if 'irrelevant' in fnc:
267 pass
265 pass
268
266
269 oldentries = set(fnc.entries)
267 oldentries = set(fnc.entries)
270 newentries = set()
268 newentries = set()
271 seenfiles = set()
269 seenfiles = set()
272
270
273 repolen = len(repo)
271 repolen = len(repo)
274 for rev in repo:
272 for rev in repo:
275 ui.progress(_('rebuilding'), rev, total=repolen,
273 ui.progress(_('rebuilding'), rev, total=repolen,
276 unit=_('changesets'))
274 unit=_('changesets'))
277
275
278 ctx = repo[rev]
276 ctx = repo[rev]
279 for f in ctx.files():
277 for f in ctx.files():
280 # This is to minimize I/O.
278 # This is to minimize I/O.
281 if f in seenfiles:
279 if f in seenfiles:
282 continue
280 continue
283 seenfiles.add(f)
281 seenfiles.add(f)
284
282
285 i = 'data/%s.i' % f
283 i = 'data/%s.i' % f
286 d = 'data/%s.d' % f
284 d = 'data/%s.d' % f
287
285
288 if repo.store._exists(i):
286 if repo.store._exists(i):
289 newentries.add(i)
287 newentries.add(i)
290 if repo.store._exists(d):
288 if repo.store._exists(d):
291 newentries.add(d)
289 newentries.add(d)
292
290
293 ui.progress(_('rebuilding'), None)
291 ui.progress(_('rebuilding'), None)
294
292
295 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
293 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
296 for dir in util.dirs(seenfiles):
294 for dir in util.dirs(seenfiles):
297 i = 'meta/%s/00manifest.i' % dir
295 i = 'meta/%s/00manifest.i' % dir
298 d = 'meta/%s/00manifest.d' % dir
296 d = 'meta/%s/00manifest.d' % dir
299
297
300 if repo.store._exists(i):
298 if repo.store._exists(i):
301 newentries.add(i)
299 newentries.add(i)
302 if repo.store._exists(d):
300 if repo.store._exists(d):
303 newentries.add(d)
301 newentries.add(d)
304
302
305 addcount = len(newentries - oldentries)
303 addcount = len(newentries - oldentries)
306 removecount = len(oldentries - newentries)
304 removecount = len(oldentries - newentries)
307 for p in sorted(oldentries - newentries):
305 for p in sorted(oldentries - newentries):
308 ui.write(_('removing %s\n') % p)
306 ui.write(_('removing %s\n') % p)
309 for p in sorted(newentries - oldentries):
307 for p in sorted(newentries - oldentries):
310 ui.write(_('adding %s\n') % p)
308 ui.write(_('adding %s\n') % p)
311
309
312 if addcount or removecount:
310 if addcount or removecount:
313 ui.write(_('%d items added, %d removed from fncache\n') %
311 ui.write(_('%d items added, %d removed from fncache\n') %
314 (addcount, removecount))
312 (addcount, removecount))
315 fnc.entries = newentries
313 fnc.entries = newentries
316 fnc._dirty = True
314 fnc._dirty = True
317
315
318 with repo.transaction('fncache') as tr:
316 with repo.transaction('fncache') as tr:
319 fnc.write(tr)
317 fnc.write(tr)
320 else:
318 else:
321 ui.write(_('fncache already up to date\n'))
319 ui.write(_('fncache already up to date\n'))
322
320
323 def stripbmrevset(repo, mark):
321 def stripbmrevset(repo, mark):
324 """
322 """
325 The revset to strip when strip is called with -B mark
323 The revset to strip when strip is called with -B mark
326
324
327 Needs to live here so extensions can use it and wrap it even when strip is
325 Needs to live here so extensions can use it and wrap it even when strip is
328 not enabled or not present on a box.
326 not enabled or not present on a box.
329 """
327 """
330 return repo.revs("ancestors(bookmark(%s)) - "
328 return repo.revs("ancestors(bookmark(%s)) - "
331 "ancestors(head() and not bookmark(%s)) - "
329 "ancestors(head() and not bookmark(%s)) - "
332 "ancestors(bookmark() and not bookmark(%s))",
330 "ancestors(bookmark() and not bookmark(%s))",
333 mark, mark, mark)
331 mark, mark, mark)
334
332
335 def deleteobsmarkers(obsstore, indices):
333 def deleteobsmarkers(obsstore, indices):
336 """Delete some obsmarkers from obsstore and return how many were deleted
334 """Delete some obsmarkers from obsstore and return how many were deleted
337
335
338 'indices' is a list of ints which are the indices
336 'indices' is a list of ints which are the indices
339 of the markers to be deleted.
337 of the markers to be deleted.
340
338
341 Every invocation of this function completely rewrites the obsstore file,
339 Every invocation of this function completely rewrites the obsstore file,
342 skipping the markers we want to be removed. The new temporary file is
340 skipping the markers we want to be removed. The new temporary file is
343 created, remaining markers are written there and on .close() this file
341 created, remaining markers are written there and on .close() this file
344 gets atomically renamed to obsstore, thus guaranteeing consistency."""
342 gets atomically renamed to obsstore, thus guaranteeing consistency."""
345 if not indices:
343 if not indices:
346 # we don't want to rewrite the obsstore with the same content
344 # we don't want to rewrite the obsstore with the same content
347 return
345 return
348
346
349 left = []
347 left = []
350 current = obsstore._all
348 current = obsstore._all
351 n = 0
349 n = 0
352 for i, m in enumerate(current):
350 for i, m in enumerate(current):
353 if i in indices:
351 if i in indices:
354 n += 1
352 n += 1
355 continue
353 continue
356 left.append(m)
354 left.append(m)
357
355
358 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
356 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
359 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
357 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
360 newobsstorefile.write(bytes)
358 newobsstorefile.write(bytes)
361 newobsstorefile.close()
359 newobsstorefile.close()
362 return n
360 return n
General Comments 0
You need to be logged in to leave comments. Login now