##// END OF EJS Templates
repair: remove unnecessary locking for bookmarks...
Martin von Zweigbergk -
r32925:4c6e4a44 default
parent child Browse files
Show More
@@ -1,372 +1,371 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 discovery,
19 discovery,
20 error,
20 error,
21 exchange,
21 exchange,
22 obsolete,
22 obsolete,
23 util,
23 util,
24 )
24 )
25
25
26 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
26 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
27 """create a bundle with the specified revisions as a backup"""
27 """create a bundle with the specified revisions as a backup"""
28
28
29 backupdir = "strip-backup"
29 backupdir = "strip-backup"
30 vfs = repo.vfs
30 vfs = repo.vfs
31 if not vfs.isdir(backupdir):
31 if not vfs.isdir(backupdir):
32 vfs.mkdir(backupdir)
32 vfs.mkdir(backupdir)
33
33
34 # Include a hash of all the nodes in the filename for uniqueness
34 # Include a hash of all the nodes in the filename for uniqueness
35 allcommits = repo.set('%ln::%ln', bases, heads)
35 allcommits = repo.set('%ln::%ln', bases, heads)
36 allhashes = sorted(c.hex() for c in allcommits)
36 allhashes = sorted(c.hex() for c in allcommits)
37 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
37 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39
39
40 cgversion = changegroup.safeversion(repo)
40 cgversion = changegroup.safeversion(repo)
41 comp = None
41 comp = None
42 if cgversion != '01':
42 if cgversion != '01':
43 bundletype = "HG20"
43 bundletype = "HG20"
44 if compress:
44 if compress:
45 comp = 'BZ'
45 comp = 'BZ'
46 elif compress:
46 elif compress:
47 bundletype = "HG10BZ"
47 bundletype = "HG10BZ"
48 else:
48 else:
49 bundletype = "HG10UN"
49 bundletype = "HG10UN"
50
50
51 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
51 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 contentopts = {'cg.version': cgversion, 'obsolescence': obsolescence}
52 contentopts = {'cg.version': cgversion, 'obsolescence': obsolescence}
53 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
53 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
54 outgoing, contentopts, vfs, compression=comp)
54 outgoing, contentopts, vfs, compression=comp)
55
55
56 def _collectfiles(repo, striprev):
56 def _collectfiles(repo, striprev):
57 """find out the filelogs affected by the strip"""
57 """find out the filelogs affected by the strip"""
58 files = set()
58 files = set()
59
59
60 for x in xrange(striprev, len(repo)):
60 for x in xrange(striprev, len(repo)):
61 files.update(repo[x].files())
61 files.update(repo[x].files())
62
62
63 return sorted(files)
63 return sorted(files)
64
64
65 def _collectbrokencsets(repo, files, striprev):
65 def _collectbrokencsets(repo, files, striprev):
66 """return the changesets which will be broken by the truncation"""
66 """return the changesets which will be broken by the truncation"""
67 s = set()
67 s = set()
68 def collectone(revlog):
68 def collectone(revlog):
69 _, brokenset = revlog.getstrippoint(striprev)
69 _, brokenset = revlog.getstrippoint(striprev)
70 s.update([revlog.linkrev(r) for r in brokenset])
70 s.update([revlog.linkrev(r) for r in brokenset])
71
71
72 collectone(repo.manifestlog._revlog)
72 collectone(repo.manifestlog._revlog)
73 for fname in files:
73 for fname in files:
74 collectone(repo.file(fname))
74 collectone(repo.file(fname))
75
75
76 return s
76 return s
77
77
78 def strip(ui, repo, nodelist, backup=True, topic='backup'):
78 def strip(ui, repo, nodelist, backup=True, topic='backup'):
79 # This function requires the caller to lock the repo, but it operates
79 # This function requires the caller to lock the repo, but it operates
80 # within a transaction of its own, and thus requires there to be no current
80 # within a transaction of its own, and thus requires there to be no current
81 # transaction when it is called.
81 # transaction when it is called.
82 if repo.currenttransaction() is not None:
82 if repo.currenttransaction() is not None:
83 raise error.ProgrammingError('cannot strip from inside a transaction')
83 raise error.ProgrammingError('cannot strip from inside a transaction')
84
84
85 # Simple way to maintain backwards compatibility for this
85 # Simple way to maintain backwards compatibility for this
86 # argument.
86 # argument.
87 if backup in ['none', 'strip']:
87 if backup in ['none', 'strip']:
88 backup = False
88 backup = False
89
89
90 repo = repo.unfiltered()
90 repo = repo.unfiltered()
91 repo.destroying()
91 repo.destroying()
92
92
93 cl = repo.changelog
93 cl = repo.changelog
94 # TODO handle undo of merge sets
94 # TODO handle undo of merge sets
95 if isinstance(nodelist, str):
95 if isinstance(nodelist, str):
96 nodelist = [nodelist]
96 nodelist = [nodelist]
97 striplist = [cl.rev(node) for node in nodelist]
97 striplist = [cl.rev(node) for node in nodelist]
98 striprev = min(striplist)
98 striprev = min(striplist)
99
99
100 files = _collectfiles(repo, striprev)
100 files = _collectfiles(repo, striprev)
101 saverevs = _collectbrokencsets(repo, files, striprev)
101 saverevs = _collectbrokencsets(repo, files, striprev)
102
102
103 # Some revisions with rev > striprev may not be descendants of striprev.
103 # Some revisions with rev > striprev may not be descendants of striprev.
104 # We have to find these revisions and put them in a bundle, so that
104 # We have to find these revisions and put them in a bundle, so that
105 # we can restore them after the truncations.
105 # we can restore them after the truncations.
106 # To create the bundle we use repo.changegroupsubset which requires
106 # To create the bundle we use repo.changegroupsubset which requires
107 # the list of heads and bases of the set of interesting revisions.
107 # the list of heads and bases of the set of interesting revisions.
108 # (head = revision in the set that has no descendant in the set;
108 # (head = revision in the set that has no descendant in the set;
109 # base = revision in the set that has no ancestor in the set)
109 # base = revision in the set that has no ancestor in the set)
110 tostrip = set(striplist)
110 tostrip = set(striplist)
111 saveheads = set(saverevs)
111 saveheads = set(saverevs)
112 for r in cl.revs(start=striprev + 1):
112 for r in cl.revs(start=striprev + 1):
113 if any(p in tostrip for p in cl.parentrevs(r)):
113 if any(p in tostrip for p in cl.parentrevs(r)):
114 tostrip.add(r)
114 tostrip.add(r)
115
115
116 if r not in tostrip:
116 if r not in tostrip:
117 saverevs.add(r)
117 saverevs.add(r)
118 saveheads.difference_update(cl.parentrevs(r))
118 saveheads.difference_update(cl.parentrevs(r))
119 saveheads.add(r)
119 saveheads.add(r)
120 saveheads = [cl.node(r) for r in saveheads]
120 saveheads = [cl.node(r) for r in saveheads]
121
121
122 # compute base nodes
122 # compute base nodes
123 if saverevs:
123 if saverevs:
124 descendants = set(cl.descendants(saverevs))
124 descendants = set(cl.descendants(saverevs))
125 saverevs.difference_update(descendants)
125 saverevs.difference_update(descendants)
126 savebases = [cl.node(r) for r in saverevs]
126 savebases = [cl.node(r) for r in saverevs]
127 stripbases = [cl.node(r) for r in tostrip]
127 stripbases = [cl.node(r) for r in tostrip]
128
128
129 stripobsidx = obsmarkers = ()
129 stripobsidx = obsmarkers = ()
130 if repo.ui.configbool('devel', 'strip-obsmarkers', True):
130 if repo.ui.configbool('devel', 'strip-obsmarkers', True):
131 obsmarkers = obsolete.exclusivemarkers(repo, stripbases)
131 obsmarkers = obsolete.exclusivemarkers(repo, stripbases)
132 if obsmarkers:
132 if obsmarkers:
133 stripobsidx = [i for i, m in enumerate(repo.obsstore)
133 stripobsidx = [i for i, m in enumerate(repo.obsstore)
134 if m in obsmarkers]
134 if m in obsmarkers]
135
135
136 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
136 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
137 # is much faster
137 # is much faster
138 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
138 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
139 if newbmtarget:
139 if newbmtarget:
140 newbmtarget = repo[newbmtarget.first()].node()
140 newbmtarget = repo[newbmtarget.first()].node()
141 else:
141 else:
142 newbmtarget = '.'
142 newbmtarget = '.'
143
143
144 bm = repo._bookmarks
144 bm = repo._bookmarks
145 updatebm = []
145 updatebm = []
146 for m in bm:
146 for m in bm:
147 rev = repo[bm[m]].rev()
147 rev = repo[bm[m]].rev()
148 if rev in tostrip:
148 if rev in tostrip:
149 updatebm.append(m)
149 updatebm.append(m)
150
150
151 # create a changegroup for all the branches we need to keep
151 # create a changegroup for all the branches we need to keep
152 backupfile = None
152 backupfile = None
153 vfs = repo.vfs
153 vfs = repo.vfs
154 node = nodelist[-1]
154 node = nodelist[-1]
155 if backup:
155 if backup:
156 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
156 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
157 repo.ui.status(_("saved backup bundle to %s\n") %
157 repo.ui.status(_("saved backup bundle to %s\n") %
158 vfs.join(backupfile))
158 vfs.join(backupfile))
159 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
159 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
160 vfs.join(backupfile))
160 vfs.join(backupfile))
161 tmpbundlefile = None
161 tmpbundlefile = None
162 if saveheads:
162 if saveheads:
163 # do not compress temporary bundle if we remove it from disk later
163 # do not compress temporary bundle if we remove it from disk later
164 #
164 #
165 # We do not include obsolescence, it might re-introduce prune markers
165 # We do not include obsolescence, it might re-introduce prune markers
166 # we are trying to strip. This is harmless since the stripped markers
166 # we are trying to strip. This is harmless since the stripped markers
167 # are already backed up and we did not touched the markers for the
167 # are already backed up and we did not touched the markers for the
168 # saved changesets.
168 # saved changesets.
169 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
169 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
170 compress=False, obsolescence=False)
170 compress=False, obsolescence=False)
171
171
172 mfst = repo.manifestlog._revlog
172 mfst = repo.manifestlog._revlog
173
173
174 try:
174 try:
175 with repo.transaction("strip") as tr:
175 with repo.transaction("strip") as tr:
176 offset = len(tr.entries)
176 offset = len(tr.entries)
177
177
178 tr.startgroup()
178 tr.startgroup()
179 cl.strip(striprev, tr)
179 cl.strip(striprev, tr)
180 mfst.strip(striprev, tr)
180 mfst.strip(striprev, tr)
181 striptrees(repo, tr, striprev, files)
181 striptrees(repo, tr, striprev, files)
182
182
183 for fn in files:
183 for fn in files:
184 repo.file(fn).strip(striprev, tr)
184 repo.file(fn).strip(striprev, tr)
185 tr.endgroup()
185 tr.endgroup()
186
186
187 for i in xrange(offset, len(tr.entries)):
187 for i in xrange(offset, len(tr.entries)):
188 file, troffset, ignore = tr.entries[i]
188 file, troffset, ignore = tr.entries[i]
189 with repo.svfs(file, 'a', checkambig=True) as fp:
189 with repo.svfs(file, 'a', checkambig=True) as fp:
190 fp.truncate(troffset)
190 fp.truncate(troffset)
191 if troffset == 0:
191 if troffset == 0:
192 repo.store.markremoved(file)
192 repo.store.markremoved(file)
193
193
194 deleteobsmarkers(repo.obsstore, stripobsidx)
194 deleteobsmarkers(repo.obsstore, stripobsidx)
195 del repo.obsstore
195 del repo.obsstore
196
196
197 if tmpbundlefile:
197 if tmpbundlefile:
198 ui.note(_("adding branch\n"))
198 ui.note(_("adding branch\n"))
199 f = vfs.open(tmpbundlefile, "rb")
199 f = vfs.open(tmpbundlefile, "rb")
200 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
200 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
201 if not repo.ui.verbose:
201 if not repo.ui.verbose:
202 # silence internal shuffling chatter
202 # silence internal shuffling chatter
203 repo.ui.pushbuffer()
203 repo.ui.pushbuffer()
204 if isinstance(gen, bundle2.unbundle20):
204 if isinstance(gen, bundle2.unbundle20):
205 with repo.transaction('strip') as tr:
205 with repo.transaction('strip') as tr:
206 bundle2.applybundle(repo, gen, tr, source='strip',
206 bundle2.applybundle(repo, gen, tr, source='strip',
207 url='bundle:' + vfs.join(tmpbundlefile))
207 url='bundle:' + vfs.join(tmpbundlefile))
208 else:
208 else:
209 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
209 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
210 True)
210 True)
211 if not repo.ui.verbose:
211 if not repo.ui.verbose:
212 repo.ui.popbuffer()
212 repo.ui.popbuffer()
213 f.close()
213 f.close()
214 repo._phasecache.invalidate()
214 repo._phasecache.invalidate()
215
215
216 for m in updatebm:
216 for m in updatebm:
217 bm[m] = repo[newbmtarget].node()
217 bm[m] = repo[newbmtarget].node()
218
218
219 with repo.lock():
220 with repo.transaction('repair') as tr:
219 with repo.transaction('repair') as tr:
221 bm.recordchange(tr)
220 bm.recordchange(tr)
222
221
223 # remove undo files
222 # remove undo files
224 for undovfs, undofile in repo.undofiles():
223 for undovfs, undofile in repo.undofiles():
225 try:
224 try:
226 undovfs.unlink(undofile)
225 undovfs.unlink(undofile)
227 except OSError as e:
226 except OSError as e:
228 if e.errno != errno.ENOENT:
227 if e.errno != errno.ENOENT:
229 ui.warn(_('error removing %s: %s\n') %
228 ui.warn(_('error removing %s: %s\n') %
230 (undovfs.join(undofile), str(e)))
229 (undovfs.join(undofile), str(e)))
231
230
232 except: # re-raises
231 except: # re-raises
233 if backupfile:
232 if backupfile:
234 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
235 % vfs.join(backupfile))
234 % vfs.join(backupfile))
236 if tmpbundlefile:
235 if tmpbundlefile:
237 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
238 % vfs.join(tmpbundlefile))
237 % vfs.join(tmpbundlefile))
239 ui.warn(_("(fix the problem, then recover the changesets with "
238 ui.warn(_("(fix the problem, then recover the changesets with "
240 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
241 raise
240 raise
242 else:
241 else:
243 if tmpbundlefile:
242 if tmpbundlefile:
244 # Remove temporary bundle only if there were no exceptions
243 # Remove temporary bundle only if there were no exceptions
245 vfs.unlink(tmpbundlefile)
244 vfs.unlink(tmpbundlefile)
246
245
247 repo.destroyed()
246 repo.destroyed()
248 # return the backup file path (or None if 'backup' was False) so
247 # return the backup file path (or None if 'backup' was False) so
249 # extensions can use it
248 # extensions can use it
250 return backupfile
249 return backupfile
251
250
252 def striptrees(repo, tr, striprev, files):
251 def striptrees(repo, tr, striprev, files):
253 if 'treemanifest' in repo.requirements: # safe but unnecessary
252 if 'treemanifest' in repo.requirements: # safe but unnecessary
254 # otherwise
253 # otherwise
255 for unencoded, encoded, size in repo.store.datafiles():
254 for unencoded, encoded, size in repo.store.datafiles():
256 if (unencoded.startswith('meta/') and
255 if (unencoded.startswith('meta/') and
257 unencoded.endswith('00manifest.i')):
256 unencoded.endswith('00manifest.i')):
258 dir = unencoded[5:-12]
257 dir = unencoded[5:-12]
259 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
258 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
260
259
261 def rebuildfncache(ui, repo):
260 def rebuildfncache(ui, repo):
262 """Rebuilds the fncache file from repo history.
261 """Rebuilds the fncache file from repo history.
263
262
264 Missing entries will be added. Extra entries will be removed.
263 Missing entries will be added. Extra entries will be removed.
265 """
264 """
266 repo = repo.unfiltered()
265 repo = repo.unfiltered()
267
266
268 if 'fncache' not in repo.requirements:
267 if 'fncache' not in repo.requirements:
269 ui.warn(_('(not rebuilding fncache because repository does not '
268 ui.warn(_('(not rebuilding fncache because repository does not '
270 'support fncache)\n'))
269 'support fncache)\n'))
271 return
270 return
272
271
273 with repo.lock():
272 with repo.lock():
274 fnc = repo.store.fncache
273 fnc = repo.store.fncache
275 # Trigger load of fncache.
274 # Trigger load of fncache.
276 if 'irrelevant' in fnc:
275 if 'irrelevant' in fnc:
277 pass
276 pass
278
277
279 oldentries = set(fnc.entries)
278 oldentries = set(fnc.entries)
280 newentries = set()
279 newentries = set()
281 seenfiles = set()
280 seenfiles = set()
282
281
283 repolen = len(repo)
282 repolen = len(repo)
284 for rev in repo:
283 for rev in repo:
285 ui.progress(_('rebuilding'), rev, total=repolen,
284 ui.progress(_('rebuilding'), rev, total=repolen,
286 unit=_('changesets'))
285 unit=_('changesets'))
287
286
288 ctx = repo[rev]
287 ctx = repo[rev]
289 for f in ctx.files():
288 for f in ctx.files():
290 # This is to minimize I/O.
289 # This is to minimize I/O.
291 if f in seenfiles:
290 if f in seenfiles:
292 continue
291 continue
293 seenfiles.add(f)
292 seenfiles.add(f)
294
293
295 i = 'data/%s.i' % f
294 i = 'data/%s.i' % f
296 d = 'data/%s.d' % f
295 d = 'data/%s.d' % f
297
296
298 if repo.store._exists(i):
297 if repo.store._exists(i):
299 newentries.add(i)
298 newentries.add(i)
300 if repo.store._exists(d):
299 if repo.store._exists(d):
301 newentries.add(d)
300 newentries.add(d)
302
301
303 ui.progress(_('rebuilding'), None)
302 ui.progress(_('rebuilding'), None)
304
303
305 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
304 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
306 for dir in util.dirs(seenfiles):
305 for dir in util.dirs(seenfiles):
307 i = 'meta/%s/00manifest.i' % dir
306 i = 'meta/%s/00manifest.i' % dir
308 d = 'meta/%s/00manifest.d' % dir
307 d = 'meta/%s/00manifest.d' % dir
309
308
310 if repo.store._exists(i):
309 if repo.store._exists(i):
311 newentries.add(i)
310 newentries.add(i)
312 if repo.store._exists(d):
311 if repo.store._exists(d):
313 newentries.add(d)
312 newentries.add(d)
314
313
315 addcount = len(newentries - oldentries)
314 addcount = len(newentries - oldentries)
316 removecount = len(oldentries - newentries)
315 removecount = len(oldentries - newentries)
317 for p in sorted(oldentries - newentries):
316 for p in sorted(oldentries - newentries):
318 ui.write(_('removing %s\n') % p)
317 ui.write(_('removing %s\n') % p)
319 for p in sorted(newentries - oldentries):
318 for p in sorted(newentries - oldentries):
320 ui.write(_('adding %s\n') % p)
319 ui.write(_('adding %s\n') % p)
321
320
322 if addcount or removecount:
321 if addcount or removecount:
323 ui.write(_('%d items added, %d removed from fncache\n') %
322 ui.write(_('%d items added, %d removed from fncache\n') %
324 (addcount, removecount))
323 (addcount, removecount))
325 fnc.entries = newentries
324 fnc.entries = newentries
326 fnc._dirty = True
325 fnc._dirty = True
327
326
328 with repo.transaction('fncache') as tr:
327 with repo.transaction('fncache') as tr:
329 fnc.write(tr)
328 fnc.write(tr)
330 else:
329 else:
331 ui.write(_('fncache already up to date\n'))
330 ui.write(_('fncache already up to date\n'))
332
331
333 def stripbmrevset(repo, mark):
332 def stripbmrevset(repo, mark):
334 """
333 """
335 The revset to strip when strip is called with -B mark
334 The revset to strip when strip is called with -B mark
336
335
337 Needs to live here so extensions can use it and wrap it even when strip is
336 Needs to live here so extensions can use it and wrap it even when strip is
338 not enabled or not present on a box.
337 not enabled or not present on a box.
339 """
338 """
340 return repo.revs("ancestors(bookmark(%s)) - "
339 return repo.revs("ancestors(bookmark(%s)) - "
341 "ancestors(head() and not bookmark(%s)) - "
340 "ancestors(head() and not bookmark(%s)) - "
342 "ancestors(bookmark() and not bookmark(%s))",
341 "ancestors(bookmark() and not bookmark(%s))",
343 mark, mark, mark)
342 mark, mark, mark)
344
343
345 def deleteobsmarkers(obsstore, indices):
344 def deleteobsmarkers(obsstore, indices):
346 """Delete some obsmarkers from obsstore and return how many were deleted
345 """Delete some obsmarkers from obsstore and return how many were deleted
347
346
348 'indices' is a list of ints which are the indices
347 'indices' is a list of ints which are the indices
349 of the markers to be deleted.
348 of the markers to be deleted.
350
349
351 Every invocation of this function completely rewrites the obsstore file,
350 Every invocation of this function completely rewrites the obsstore file,
352 skipping the markers we want to be removed. The new temporary file is
351 skipping the markers we want to be removed. The new temporary file is
353 created, remaining markers are written there and on .close() this file
352 created, remaining markers are written there and on .close() this file
354 gets atomically renamed to obsstore, thus guaranteeing consistency."""
353 gets atomically renamed to obsstore, thus guaranteeing consistency."""
355 if not indices:
354 if not indices:
356 # we don't want to rewrite the obsstore with the same content
355 # we don't want to rewrite the obsstore with the same content
357 return
356 return
358
357
359 left = []
358 left = []
360 current = obsstore._all
359 current = obsstore._all
361 n = 0
360 n = 0
362 for i, m in enumerate(current):
361 for i, m in enumerate(current):
363 if i in indices:
362 if i in indices:
364 n += 1
363 n += 1
365 continue
364 continue
366 left.append(m)
365 left.append(m)
367
366
368 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
367 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
369 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
368 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
370 newobsstorefile.write(bytes)
369 newobsstorefile.write(bytes)
371 newobsstorefile.close()
370 newobsstorefile.close()
372 return n
371 return n
General Comments 0
You need to be logged in to leave comments. Login now