##// END OF EJS Templates
fncache: make debugrebuildfncache not fail on broken fncache...
Valentin Gatien-Baron -
r42960:f59f8a5e stable
parent child Browse files
Show More
@@ -1,482 +1,480 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 short,
17 short,
18 )
18 )
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 exchange,
24 exchange,
25 obsolete,
25 obsolete,
26 obsutil,
26 obsutil,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 stringutil,
32 stringutil,
33 )
33 )
34
34
35 def backupbundle(repo, bases, heads, node, suffix, compress=True,
35 def backupbundle(repo, bases, heads, node, suffix, compress=True,
36 obsolescence=True):
36 obsolescence=True):
37 """create a bundle with the specified revisions as a backup"""
37 """create a bundle with the specified revisions as a backup"""
38
38
39 backupdir = "strip-backup"
39 backupdir = "strip-backup"
40 vfs = repo.vfs
40 vfs = repo.vfs
41 if not vfs.isdir(backupdir):
41 if not vfs.isdir(backupdir):
42 vfs.mkdir(backupdir)
42 vfs.mkdir(backupdir)
43
43
44 # Include a hash of all the nodes in the filename for uniqueness
44 # Include a hash of all the nodes in the filename for uniqueness
45 allcommits = repo.set('%ln::%ln', bases, heads)
45 allcommits = repo.set('%ln::%ln', bases, heads)
46 allhashes = sorted(c.hex() for c in allcommits)
46 allhashes = sorted(c.hex() for c in allcommits)
47 totalhash = hashlib.sha1(''.join(allhashes)).digest()
47 totalhash = hashlib.sha1(''.join(allhashes)).digest()
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
49 hex(totalhash[:4]), suffix)
49 hex(totalhash[:4]), suffix)
50
50
51 cgversion = changegroup.localversion(repo)
51 cgversion = changegroup.localversion(repo)
52 comp = None
52 comp = None
53 if cgversion != '01':
53 if cgversion != '01':
54 bundletype = "HG20"
54 bundletype = "HG20"
55 if compress:
55 if compress:
56 comp = 'BZ'
56 comp = 'BZ'
57 elif compress:
57 elif compress:
58 bundletype = "HG10BZ"
58 bundletype = "HG10BZ"
59 else:
59 else:
60 bundletype = "HG10UN"
60 bundletype = "HG10UN"
61
61
62 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
62 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
63 contentopts = {
63 contentopts = {
64 'cg.version': cgversion,
64 'cg.version': cgversion,
65 'obsolescence': obsolescence,
65 'obsolescence': obsolescence,
66 'phases': True,
66 'phases': True,
67 }
67 }
68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
69 outgoing, contentopts, vfs, compression=comp)
69 outgoing, contentopts, vfs, compression=comp)
70
70
71 def _collectfiles(repo, striprev):
71 def _collectfiles(repo, striprev):
72 """find out the filelogs affected by the strip"""
72 """find out the filelogs affected by the strip"""
73 files = set()
73 files = set()
74
74
75 for x in pycompat.xrange(striprev, len(repo)):
75 for x in pycompat.xrange(striprev, len(repo)):
76 files.update(repo[x].files())
76 files.update(repo[x].files())
77
77
78 return sorted(files)
78 return sorted(files)
79
79
80 def _collectrevlog(revlog, striprev):
80 def _collectrevlog(revlog, striprev):
81 _, brokenset = revlog.getstrippoint(striprev)
81 _, brokenset = revlog.getstrippoint(striprev)
82 return [revlog.linkrev(r) for r in brokenset]
82 return [revlog.linkrev(r) for r in brokenset]
83
83
84 def _collectmanifest(repo, striprev):
84 def _collectmanifest(repo, striprev):
85 return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
85 return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
86
86
87 def _collectbrokencsets(repo, files, striprev):
87 def _collectbrokencsets(repo, files, striprev):
88 """return the changesets which will be broken by the truncation"""
88 """return the changesets which will be broken by the truncation"""
89 s = set()
89 s = set()
90
90
91 s.update(_collectmanifest(repo, striprev))
91 s.update(_collectmanifest(repo, striprev))
92 for fname in files:
92 for fname in files:
93 s.update(_collectrevlog(repo.file(fname), striprev))
93 s.update(_collectrevlog(repo.file(fname), striprev))
94
94
95 return s
95 return s
96
96
97 def strip(ui, repo, nodelist, backup=True, topic='backup'):
97 def strip(ui, repo, nodelist, backup=True, topic='backup'):
98 # This function requires the caller to lock the repo, but it operates
98 # This function requires the caller to lock the repo, but it operates
99 # within a transaction of its own, and thus requires there to be no current
99 # within a transaction of its own, and thus requires there to be no current
100 # transaction when it is called.
100 # transaction when it is called.
101 if repo.currenttransaction() is not None:
101 if repo.currenttransaction() is not None:
102 raise error.ProgrammingError('cannot strip from inside a transaction')
102 raise error.ProgrammingError('cannot strip from inside a transaction')
103
103
104 # Simple way to maintain backwards compatibility for this
104 # Simple way to maintain backwards compatibility for this
105 # argument.
105 # argument.
106 if backup in ['none', 'strip']:
106 if backup in ['none', 'strip']:
107 backup = False
107 backup = False
108
108
109 repo = repo.unfiltered()
109 repo = repo.unfiltered()
110 repo.destroying()
110 repo.destroying()
111 vfs = repo.vfs
111 vfs = repo.vfs
112 # load bookmark before changelog to avoid side effect from outdated
112 # load bookmark before changelog to avoid side effect from outdated
113 # changelog (see repo._refreshchangelog)
113 # changelog (see repo._refreshchangelog)
114 repo._bookmarks
114 repo._bookmarks
115 cl = repo.changelog
115 cl = repo.changelog
116
116
117 # TODO handle undo of merge sets
117 # TODO handle undo of merge sets
118 if isinstance(nodelist, str):
118 if isinstance(nodelist, str):
119 nodelist = [nodelist]
119 nodelist = [nodelist]
120 striplist = [cl.rev(node) for node in nodelist]
120 striplist = [cl.rev(node) for node in nodelist]
121 striprev = min(striplist)
121 striprev = min(striplist)
122
122
123 files = _collectfiles(repo, striprev)
123 files = _collectfiles(repo, striprev)
124 saverevs = _collectbrokencsets(repo, files, striprev)
124 saverevs = _collectbrokencsets(repo, files, striprev)
125
125
126 # Some revisions with rev > striprev may not be descendants of striprev.
126 # Some revisions with rev > striprev may not be descendants of striprev.
127 # We have to find these revisions and put them in a bundle, so that
127 # We have to find these revisions and put them in a bundle, so that
128 # we can restore them after the truncations.
128 # we can restore them after the truncations.
129 # To create the bundle we use repo.changegroupsubset which requires
129 # To create the bundle we use repo.changegroupsubset which requires
130 # the list of heads and bases of the set of interesting revisions.
130 # the list of heads and bases of the set of interesting revisions.
131 # (head = revision in the set that has no descendant in the set;
131 # (head = revision in the set that has no descendant in the set;
132 # base = revision in the set that has no ancestor in the set)
132 # base = revision in the set that has no ancestor in the set)
133 tostrip = set(striplist)
133 tostrip = set(striplist)
134 saveheads = set(saverevs)
134 saveheads = set(saverevs)
135 for r in cl.revs(start=striprev + 1):
135 for r in cl.revs(start=striprev + 1):
136 if any(p in tostrip for p in cl.parentrevs(r)):
136 if any(p in tostrip for p in cl.parentrevs(r)):
137 tostrip.add(r)
137 tostrip.add(r)
138
138
139 if r not in tostrip:
139 if r not in tostrip:
140 saverevs.add(r)
140 saverevs.add(r)
141 saveheads.difference_update(cl.parentrevs(r))
141 saveheads.difference_update(cl.parentrevs(r))
142 saveheads.add(r)
142 saveheads.add(r)
143 saveheads = [cl.node(r) for r in saveheads]
143 saveheads = [cl.node(r) for r in saveheads]
144
144
145 # compute base nodes
145 # compute base nodes
146 if saverevs:
146 if saverevs:
147 descendants = set(cl.descendants(saverevs))
147 descendants = set(cl.descendants(saverevs))
148 saverevs.difference_update(descendants)
148 saverevs.difference_update(descendants)
149 savebases = [cl.node(r) for r in saverevs]
149 savebases = [cl.node(r) for r in saverevs]
150 stripbases = [cl.node(r) for r in tostrip]
150 stripbases = [cl.node(r) for r in tostrip]
151
151
152 stripobsidx = obsmarkers = ()
152 stripobsidx = obsmarkers = ()
153 if repo.ui.configbool('devel', 'strip-obsmarkers'):
153 if repo.ui.configbool('devel', 'strip-obsmarkers'):
154 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
154 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
155 if obsmarkers:
155 if obsmarkers:
156 stripobsidx = [i for i, m in enumerate(repo.obsstore)
156 stripobsidx = [i for i, m in enumerate(repo.obsstore)
157 if m in obsmarkers]
157 if m in obsmarkers]
158
158
159 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
159 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
160
160
161 backupfile = None
161 backupfile = None
162 node = nodelist[-1]
162 node = nodelist[-1]
163 if backup:
163 if backup:
164 backupfile = _createstripbackup(repo, stripbases, node, topic)
164 backupfile = _createstripbackup(repo, stripbases, node, topic)
165 # create a changegroup for all the branches we need to keep
165 # create a changegroup for all the branches we need to keep
166 tmpbundlefile = None
166 tmpbundlefile = None
167 if saveheads:
167 if saveheads:
168 # do not compress temporary bundle if we remove it from disk later
168 # do not compress temporary bundle if we remove it from disk later
169 #
169 #
170 # We do not include obsolescence, it might re-introduce prune markers
170 # We do not include obsolescence, it might re-introduce prune markers
171 # we are trying to strip. This is harmless since the stripped markers
171 # we are trying to strip. This is harmless since the stripped markers
172 # are already backed up and we did not touched the markers for the
172 # are already backed up and we did not touched the markers for the
173 # saved changesets.
173 # saved changesets.
174 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
174 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
175 compress=False, obsolescence=False)
175 compress=False, obsolescence=False)
176
176
177 with ui.uninterruptible():
177 with ui.uninterruptible():
178 try:
178 try:
179 with repo.transaction("strip") as tr:
179 with repo.transaction("strip") as tr:
180 # TODO this code violates the interface abstraction of the
180 # TODO this code violates the interface abstraction of the
181 # transaction and makes assumptions that file storage is
181 # transaction and makes assumptions that file storage is
182 # using append-only files. We'll need some kind of storage
182 # using append-only files. We'll need some kind of storage
183 # API to handle stripping for us.
183 # API to handle stripping for us.
184 offset = len(tr._entries)
184 offset = len(tr._entries)
185
185
186 tr.startgroup()
186 tr.startgroup()
187 cl.strip(striprev, tr)
187 cl.strip(striprev, tr)
188 stripmanifest(repo, striprev, tr, files)
188 stripmanifest(repo, striprev, tr, files)
189
189
190 for fn in files:
190 for fn in files:
191 repo.file(fn).strip(striprev, tr)
191 repo.file(fn).strip(striprev, tr)
192 tr.endgroup()
192 tr.endgroup()
193
193
194 for i in pycompat.xrange(offset, len(tr._entries)):
194 for i in pycompat.xrange(offset, len(tr._entries)):
195 file, troffset, ignore = tr._entries[i]
195 file, troffset, ignore = tr._entries[i]
196 with repo.svfs(file, 'a', checkambig=True) as fp:
196 with repo.svfs(file, 'a', checkambig=True) as fp:
197 fp.truncate(troffset)
197 fp.truncate(troffset)
198 if troffset == 0:
198 if troffset == 0:
199 repo.store.markremoved(file)
199 repo.store.markremoved(file)
200
200
201 deleteobsmarkers(repo.obsstore, stripobsidx)
201 deleteobsmarkers(repo.obsstore, stripobsidx)
202 del repo.obsstore
202 del repo.obsstore
203 repo.invalidatevolatilesets()
203 repo.invalidatevolatilesets()
204 repo._phasecache.filterunknown(repo)
204 repo._phasecache.filterunknown(repo)
205
205
206 if tmpbundlefile:
206 if tmpbundlefile:
207 ui.note(_("adding branch\n"))
207 ui.note(_("adding branch\n"))
208 f = vfs.open(tmpbundlefile, "rb")
208 f = vfs.open(tmpbundlefile, "rb")
209 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
209 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
210 if not repo.ui.verbose:
210 if not repo.ui.verbose:
211 # silence internal shuffling chatter
211 # silence internal shuffling chatter
212 repo.ui.pushbuffer()
212 repo.ui.pushbuffer()
213 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
213 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
214 txnname = 'strip'
214 txnname = 'strip'
215 if not isinstance(gen, bundle2.unbundle20):
215 if not isinstance(gen, bundle2.unbundle20):
216 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
216 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
217 with repo.transaction(txnname) as tr:
217 with repo.transaction(txnname) as tr:
218 bundle2.applybundle(repo, gen, tr, source='strip',
218 bundle2.applybundle(repo, gen, tr, source='strip',
219 url=tmpbundleurl)
219 url=tmpbundleurl)
220 if not repo.ui.verbose:
220 if not repo.ui.verbose:
221 repo.ui.popbuffer()
221 repo.ui.popbuffer()
222 f.close()
222 f.close()
223
223
224 with repo.transaction('repair') as tr:
224 with repo.transaction('repair') as tr:
225 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
225 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
226 repo._bookmarks.applychanges(repo, tr, bmchanges)
226 repo._bookmarks.applychanges(repo, tr, bmchanges)
227
227
228 # remove undo files
228 # remove undo files
229 for undovfs, undofile in repo.undofiles():
229 for undovfs, undofile in repo.undofiles():
230 try:
230 try:
231 undovfs.unlink(undofile)
231 undovfs.unlink(undofile)
232 except OSError as e:
232 except OSError as e:
233 if e.errno != errno.ENOENT:
233 if e.errno != errno.ENOENT:
234 ui.warn(_('error removing %s: %s\n') %
234 ui.warn(_('error removing %s: %s\n') %
235 (undovfs.join(undofile),
235 (undovfs.join(undofile),
236 stringutil.forcebytestr(e)))
236 stringutil.forcebytestr(e)))
237
237
238 except: # re-raises
238 except: # re-raises
239 if backupfile:
239 if backupfile:
240 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
240 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
241 % vfs.join(backupfile))
241 % vfs.join(backupfile))
242 if tmpbundlefile:
242 if tmpbundlefile:
243 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
243 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
244 % vfs.join(tmpbundlefile))
244 % vfs.join(tmpbundlefile))
245 ui.warn(_("(fix the problem, then recover the changesets with "
245 ui.warn(_("(fix the problem, then recover the changesets with "
246 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
246 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
247 raise
247 raise
248 else:
248 else:
249 if tmpbundlefile:
249 if tmpbundlefile:
250 # Remove temporary bundle only if there were no exceptions
250 # Remove temporary bundle only if there were no exceptions
251 vfs.unlink(tmpbundlefile)
251 vfs.unlink(tmpbundlefile)
252
252
253 repo.destroyed()
253 repo.destroyed()
254 # return the backup file path (or None if 'backup' was False) so
254 # return the backup file path (or None if 'backup' was False) so
255 # extensions can use it
255 # extensions can use it
256 return backupfile
256 return backupfile
257
257
258 def softstrip(ui, repo, nodelist, backup=True, topic='backup'):
258 def softstrip(ui, repo, nodelist, backup=True, topic='backup'):
259 """perform a "soft" strip using the archived phase"""
259 """perform a "soft" strip using the archived phase"""
260 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)]
260 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)]
261 if not tostrip:
261 if not tostrip:
262 return None
262 return None
263
263
264 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
264 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
265 if backup:
265 if backup:
266 node = tostrip[0]
266 node = tostrip[0]
267 backupfile = _createstripbackup(repo, tostrip, node, topic)
267 backupfile = _createstripbackup(repo, tostrip, node, topic)
268
268
269 with repo.transaction('strip') as tr:
269 with repo.transaction('strip') as tr:
270 phases.retractboundary(repo, tr, phases.archived, tostrip)
270 phases.retractboundary(repo, tr, phases.archived, tostrip)
271 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
271 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
272 repo._bookmarks.applychanges(repo, tr, bmchanges)
272 repo._bookmarks.applychanges(repo, tr, bmchanges)
273 return backupfile
273 return backupfile
274
274
275
275
276 def _bookmarkmovements(repo, tostrip):
276 def _bookmarkmovements(repo, tostrip):
277 # compute necessary bookmark movement
277 # compute necessary bookmark movement
278 bm = repo._bookmarks
278 bm = repo._bookmarks
279 updatebm = []
279 updatebm = []
280 for m in bm:
280 for m in bm:
281 rev = repo[bm[m]].rev()
281 rev = repo[bm[m]].rev()
282 if rev in tostrip:
282 if rev in tostrip:
283 updatebm.append(m)
283 updatebm.append(m)
284 newbmtarget = None
284 newbmtarget = None
285 # If we need to move bookmarks, compute bookmark
285 # If we need to move bookmarks, compute bookmark
286 # targets. Otherwise we can skip doing this logic.
286 # targets. Otherwise we can skip doing this logic.
287 if updatebm:
287 if updatebm:
288 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
288 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
289 # but is much faster
289 # but is much faster
290 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
290 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
291 if newbmtarget:
291 if newbmtarget:
292 newbmtarget = repo[newbmtarget.first()].node()
292 newbmtarget = repo[newbmtarget.first()].node()
293 else:
293 else:
294 newbmtarget = '.'
294 newbmtarget = '.'
295 return newbmtarget, updatebm
295 return newbmtarget, updatebm
296
296
297 def _createstripbackup(repo, stripbases, node, topic):
297 def _createstripbackup(repo, stripbases, node, topic):
298 # backup the changeset we are about to strip
298 # backup the changeset we are about to strip
299 vfs = repo.vfs
299 vfs = repo.vfs
300 cl = repo.changelog
300 cl = repo.changelog
301 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
301 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
302 repo.ui.status(_("saved backup bundle to %s\n") %
302 repo.ui.status(_("saved backup bundle to %s\n") %
303 vfs.join(backupfile))
303 vfs.join(backupfile))
304 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
304 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
305 vfs.join(backupfile))
305 vfs.join(backupfile))
306 return backupfile
306 return backupfile
307
307
308 def safestriproots(ui, repo, nodes):
308 def safestriproots(ui, repo, nodes):
309 """return list of roots of nodes where descendants are covered by nodes"""
309 """return list of roots of nodes where descendants are covered by nodes"""
310 torev = repo.unfiltered().changelog.rev
310 torev = repo.unfiltered().changelog.rev
311 revs = set(torev(n) for n in nodes)
311 revs = set(torev(n) for n in nodes)
312 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
312 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
313 # orphaned = affected - wanted
313 # orphaned = affected - wanted
314 # affected = descendants(roots(wanted))
314 # affected = descendants(roots(wanted))
315 # wanted = revs
315 # wanted = revs
316 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
316 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
317 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
317 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
318 notstrip = revs - tostrip
318 notstrip = revs - tostrip
319 if notstrip:
319 if notstrip:
320 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
320 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
321 ui.warn(_('warning: orphaned descendants detected, '
321 ui.warn(_('warning: orphaned descendants detected, '
322 'not stripping %s\n') % nodestr)
322 'not stripping %s\n') % nodestr)
323 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
323 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
324
324
325 class stripcallback(object):
325 class stripcallback(object):
326 """used as a transaction postclose callback"""
326 """used as a transaction postclose callback"""
327
327
328 def __init__(self, ui, repo, backup, topic):
328 def __init__(self, ui, repo, backup, topic):
329 self.ui = ui
329 self.ui = ui
330 self.repo = repo
330 self.repo = repo
331 self.backup = backup
331 self.backup = backup
332 self.topic = topic or 'backup'
332 self.topic = topic or 'backup'
333 self.nodelist = []
333 self.nodelist = []
334
334
335 def addnodes(self, nodes):
335 def addnodes(self, nodes):
336 self.nodelist.extend(nodes)
336 self.nodelist.extend(nodes)
337
337
338 def __call__(self, tr):
338 def __call__(self, tr):
339 roots = safestriproots(self.ui, self.repo, self.nodelist)
339 roots = safestriproots(self.ui, self.repo, self.nodelist)
340 if roots:
340 if roots:
341 strip(self.ui, self.repo, roots, self.backup, self.topic)
341 strip(self.ui, self.repo, roots, self.backup, self.topic)
342
342
343 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
343 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
344 """like strip, but works inside transaction and won't strip irreverent revs
344 """like strip, but works inside transaction and won't strip irreverent revs
345
345
346 nodelist must explicitly contain all descendants. Otherwise a warning will
346 nodelist must explicitly contain all descendants. Otherwise a warning will
347 be printed that some nodes are not stripped.
347 be printed that some nodes are not stripped.
348
348
349 Will do a backup if `backup` is True. The last non-None "topic" will be
349 Will do a backup if `backup` is True. The last non-None "topic" will be
350 used as the backup topic name. The default backup topic name is "backup".
350 used as the backup topic name. The default backup topic name is "backup".
351 """
351 """
352 tr = repo.currenttransaction()
352 tr = repo.currenttransaction()
353 if not tr:
353 if not tr:
354 nodes = safestriproots(ui, repo, nodelist)
354 nodes = safestriproots(ui, repo, nodelist)
355 return strip(ui, repo, nodes, backup=backup, topic=topic)
355 return strip(ui, repo, nodes, backup=backup, topic=topic)
356 # transaction postclose callbacks are called in alphabet order.
356 # transaction postclose callbacks are called in alphabet order.
357 # use '\xff' as prefix so we are likely to be called last.
357 # use '\xff' as prefix so we are likely to be called last.
358 callback = tr.getpostclose('\xffstrip')
358 callback = tr.getpostclose('\xffstrip')
359 if callback is None:
359 if callback is None:
360 callback = stripcallback(ui, repo, backup=backup, topic=topic)
360 callback = stripcallback(ui, repo, backup=backup, topic=topic)
361 tr.addpostclose('\xffstrip', callback)
361 tr.addpostclose('\xffstrip', callback)
362 if topic:
362 if topic:
363 callback.topic = topic
363 callback.topic = topic
364 callback.addnodes(nodelist)
364 callback.addnodes(nodelist)
365
365
366 def stripmanifest(repo, striprev, tr, files):
366 def stripmanifest(repo, striprev, tr, files):
367 revlog = repo.manifestlog.getstorage(b'')
367 revlog = repo.manifestlog.getstorage(b'')
368 revlog.strip(striprev, tr)
368 revlog.strip(striprev, tr)
369 striptrees(repo, tr, striprev, files)
369 striptrees(repo, tr, striprev, files)
370
370
371 def striptrees(repo, tr, striprev, files):
371 def striptrees(repo, tr, striprev, files):
372 if 'treemanifest' in repo.requirements:
372 if 'treemanifest' in repo.requirements:
373 # This logic is safe if treemanifest isn't enabled, but also
373 # This logic is safe if treemanifest isn't enabled, but also
374 # pointless, so we skip it if treemanifest isn't enabled.
374 # pointless, so we skip it if treemanifest isn't enabled.
375 for unencoded, encoded, size in repo.store.datafiles():
375 for unencoded, encoded, size in repo.store.datafiles():
376 if (unencoded.startswith('meta/') and
376 if (unencoded.startswith('meta/') and
377 unencoded.endswith('00manifest.i')):
377 unencoded.endswith('00manifest.i')):
378 dir = unencoded[5:-12]
378 dir = unencoded[5:-12]
379 repo.manifestlog.getstorage(dir).strip(striprev, tr)
379 repo.manifestlog.getstorage(dir).strip(striprev, tr)
380
380
381 def rebuildfncache(ui, repo):
381 def rebuildfncache(ui, repo):
382 """Rebuilds the fncache file from repo history.
382 """Rebuilds the fncache file from repo history.
383
383
384 Missing entries will be added. Extra entries will be removed.
384 Missing entries will be added. Extra entries will be removed.
385 """
385 """
386 repo = repo.unfiltered()
386 repo = repo.unfiltered()
387
387
388 if 'fncache' not in repo.requirements:
388 if 'fncache' not in repo.requirements:
389 ui.warn(_('(not rebuilding fncache because repository does not '
389 ui.warn(_('(not rebuilding fncache because repository does not '
390 'support fncache)\n'))
390 'support fncache)\n'))
391 return
391 return
392
392
393 with repo.lock():
393 with repo.lock():
394 fnc = repo.store.fncache
394 fnc = repo.store.fncache
395 # Trigger load of fncache.
395 fnc.ensureloaded(warn=ui.warn)
396 if 'irrelevant' in fnc:
397 pass
398
396
399 oldentries = set(fnc.entries)
397 oldentries = set(fnc.entries)
400 newentries = set()
398 newentries = set()
401 seenfiles = set()
399 seenfiles = set()
402
400
403 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
401 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
404 total=len(repo))
402 total=len(repo))
405 for rev in repo:
403 for rev in repo:
406 progress.update(rev)
404 progress.update(rev)
407
405
408 ctx = repo[rev]
406 ctx = repo[rev]
409 for f in ctx.files():
407 for f in ctx.files():
410 # This is to minimize I/O.
408 # This is to minimize I/O.
411 if f in seenfiles:
409 if f in seenfiles:
412 continue
410 continue
413 seenfiles.add(f)
411 seenfiles.add(f)
414
412
415 i = 'data/%s.i' % f
413 i = 'data/%s.i' % f
416 d = 'data/%s.d' % f
414 d = 'data/%s.d' % f
417
415
418 if repo.store._exists(i):
416 if repo.store._exists(i):
419 newentries.add(i)
417 newentries.add(i)
420 if repo.store._exists(d):
418 if repo.store._exists(d):
421 newentries.add(d)
419 newentries.add(d)
422
420
423 progress.complete()
421 progress.complete()
424
422
425 if 'treemanifest' in repo.requirements:
423 if 'treemanifest' in repo.requirements:
426 # This logic is safe if treemanifest isn't enabled, but also
424 # This logic is safe if treemanifest isn't enabled, but also
427 # pointless, so we skip it if treemanifest isn't enabled.
425 # pointless, so we skip it if treemanifest isn't enabled.
428 for dir in util.dirs(seenfiles):
426 for dir in util.dirs(seenfiles):
429 i = 'meta/%s/00manifest.i' % dir
427 i = 'meta/%s/00manifest.i' % dir
430 d = 'meta/%s/00manifest.d' % dir
428 d = 'meta/%s/00manifest.d' % dir
431
429
432 if repo.store._exists(i):
430 if repo.store._exists(i):
433 newentries.add(i)
431 newentries.add(i)
434 if repo.store._exists(d):
432 if repo.store._exists(d):
435 newentries.add(d)
433 newentries.add(d)
436
434
437 addcount = len(newentries - oldentries)
435 addcount = len(newentries - oldentries)
438 removecount = len(oldentries - newentries)
436 removecount = len(oldentries - newentries)
439 for p in sorted(oldentries - newentries):
437 for p in sorted(oldentries - newentries):
440 ui.write(_('removing %s\n') % p)
438 ui.write(_('removing %s\n') % p)
441 for p in sorted(newentries - oldentries):
439 for p in sorted(newentries - oldentries):
442 ui.write(_('adding %s\n') % p)
440 ui.write(_('adding %s\n') % p)
443
441
444 if addcount or removecount:
442 if addcount or removecount:
445 ui.write(_('%d items added, %d removed from fncache\n') %
443 ui.write(_('%d items added, %d removed from fncache\n') %
446 (addcount, removecount))
444 (addcount, removecount))
447 fnc.entries = newentries
445 fnc.entries = newentries
448 fnc._dirty = True
446 fnc._dirty = True
449
447
450 with repo.transaction('fncache') as tr:
448 with repo.transaction('fncache') as tr:
451 fnc.write(tr)
449 fnc.write(tr)
452 else:
450 else:
453 ui.write(_('fncache already up to date\n'))
451 ui.write(_('fncache already up to date\n'))
454
452
455 def deleteobsmarkers(obsstore, indices):
453 def deleteobsmarkers(obsstore, indices):
456 """Delete some obsmarkers from obsstore and return how many were deleted
454 """Delete some obsmarkers from obsstore and return how many were deleted
457
455
458 'indices' is a list of ints which are the indices
456 'indices' is a list of ints which are the indices
459 of the markers to be deleted.
457 of the markers to be deleted.
460
458
461 Every invocation of this function completely rewrites the obsstore file,
459 Every invocation of this function completely rewrites the obsstore file,
462 skipping the markers we want to be removed. The new temporary file is
460 skipping the markers we want to be removed. The new temporary file is
463 created, remaining markers are written there and on .close() this file
461 created, remaining markers are written there and on .close() this file
464 gets atomically renamed to obsstore, thus guaranteeing consistency."""
462 gets atomically renamed to obsstore, thus guaranteeing consistency."""
465 if not indices:
463 if not indices:
466 # we don't want to rewrite the obsstore with the same content
464 # we don't want to rewrite the obsstore with the same content
467 return
465 return
468
466
469 left = []
467 left = []
470 current = obsstore._all
468 current = obsstore._all
471 n = 0
469 n = 0
472 for i, m in enumerate(current):
470 for i, m in enumerate(current):
473 if i in indices:
471 if i in indices:
474 n += 1
472 n += 1
475 continue
473 continue
476 left.append(m)
474 left.append(m)
477
475
478 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
476 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
479 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
477 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
480 newobsstorefile.write(bytes)
478 newobsstorefile.write(bytes)
481 newobsstorefile.close()
479 newobsstorefile.close()
482 return n
480 return n
@@ -1,654 +1,669 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from . import (
17 from . import (
18 error,
18 error,
19 node,
19 node,
20 policy,
20 policy,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 vfs as vfsmod,
23 vfs as vfsmod,
24 )
24 )
25
25
26 parsers = policy.importmod(r'parsers')
26 parsers = policy.importmod(r'parsers')
27 # how much bytes should be read from fncache in one read
27 # how much bytes should be read from fncache in one read
28 # It is done to prevent loading large fncache files into memory
28 # It is done to prevent loading large fncache files into memory
29 fncache_chunksize = 10 ** 6
29 fncache_chunksize = 10 ** 6
30
30
31 def _matchtrackedpath(path, matcher):
31 def _matchtrackedpath(path, matcher):
32 """parses a fncache entry and returns whether the entry is tracking a path
32 """parses a fncache entry and returns whether the entry is tracking a path
33 matched by matcher or not.
33 matched by matcher or not.
34
34
35 If matcher is None, returns True"""
35 If matcher is None, returns True"""
36
36
37 if matcher is None:
37 if matcher is None:
38 return True
38 return True
39 path = decodedir(path)
39 path = decodedir(path)
40 if path.startswith('data/'):
40 if path.startswith('data/'):
41 return matcher(path[len('data/'):-len('.i')])
41 return matcher(path[len('data/'):-len('.i')])
42 elif path.startswith('meta/'):
42 elif path.startswith('meta/'):
43 return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')])
43 return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')])
44
44
45 raise error.ProgrammingError("cannot decode path %s" % path)
45 raise error.ProgrammingError("cannot decode path %s" % path)
46
46
47 # This avoids a collision between a file named foo and a dir named
47 # This avoids a collision between a file named foo and a dir named
48 # foo.i or foo.d
48 # foo.i or foo.d
49 def _encodedir(path):
49 def _encodedir(path):
50 '''
50 '''
51 >>> _encodedir(b'data/foo.i')
51 >>> _encodedir(b'data/foo.i')
52 'data/foo.i'
52 'data/foo.i'
53 >>> _encodedir(b'data/foo.i/bla.i')
53 >>> _encodedir(b'data/foo.i/bla.i')
54 'data/foo.i.hg/bla.i'
54 'data/foo.i.hg/bla.i'
55 >>> _encodedir(b'data/foo.i.hg/bla.i')
55 >>> _encodedir(b'data/foo.i.hg/bla.i')
56 'data/foo.i.hg.hg/bla.i'
56 'data/foo.i.hg.hg/bla.i'
57 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
57 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
58 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
58 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
59 '''
59 '''
60 return (path
60 return (path
61 .replace(".hg/", ".hg.hg/")
61 .replace(".hg/", ".hg.hg/")
62 .replace(".i/", ".i.hg/")
62 .replace(".i/", ".i.hg/")
63 .replace(".d/", ".d.hg/"))
63 .replace(".d/", ".d.hg/"))
64
64
65 encodedir = getattr(parsers, 'encodedir', _encodedir)
65 encodedir = getattr(parsers, 'encodedir', _encodedir)
66
66
67 def decodedir(path):
67 def decodedir(path):
68 '''
68 '''
69 >>> decodedir(b'data/foo.i')
69 >>> decodedir(b'data/foo.i')
70 'data/foo.i'
70 'data/foo.i'
71 >>> decodedir(b'data/foo.i.hg/bla.i')
71 >>> decodedir(b'data/foo.i.hg/bla.i')
72 'data/foo.i/bla.i'
72 'data/foo.i/bla.i'
73 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
73 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
74 'data/foo.i.hg/bla.i'
74 'data/foo.i.hg/bla.i'
75 '''
75 '''
76 if ".hg/" not in path:
76 if ".hg/" not in path:
77 return path
77 return path
78 return (path
78 return (path
79 .replace(".d.hg/", ".d/")
79 .replace(".d.hg/", ".d/")
80 .replace(".i.hg/", ".i/")
80 .replace(".i.hg/", ".i/")
81 .replace(".hg.hg/", ".hg/"))
81 .replace(".hg.hg/", ".hg/"))
82
82
83 def _reserved():
83 def _reserved():
84 ''' characters that are problematic for filesystems
84 ''' characters that are problematic for filesystems
85
85
86 * ascii escapes (0..31)
86 * ascii escapes (0..31)
87 * ascii hi (126..255)
87 * ascii hi (126..255)
88 * windows specials
88 * windows specials
89
89
90 these characters will be escaped by encodefunctions
90 these characters will be escaped by encodefunctions
91 '''
91 '''
92 winreserved = [ord(x) for x in u'\\:*?"<>|']
92 winreserved = [ord(x) for x in u'\\:*?"<>|']
93 for x in range(32):
93 for x in range(32):
94 yield x
94 yield x
95 for x in range(126, 256):
95 for x in range(126, 256):
96 yield x
96 yield x
97 for x in winreserved:
97 for x in winreserved:
98 yield x
98 yield x
99
99
100 def _buildencodefun():
100 def _buildencodefun():
101 '''
101 '''
102 >>> enc, dec = _buildencodefun()
102 >>> enc, dec = _buildencodefun()
103
103
104 >>> enc(b'nothing/special.txt')
104 >>> enc(b'nothing/special.txt')
105 'nothing/special.txt'
105 'nothing/special.txt'
106 >>> dec(b'nothing/special.txt')
106 >>> dec(b'nothing/special.txt')
107 'nothing/special.txt'
107 'nothing/special.txt'
108
108
109 >>> enc(b'HELLO')
109 >>> enc(b'HELLO')
110 '_h_e_l_l_o'
110 '_h_e_l_l_o'
111 >>> dec(b'_h_e_l_l_o')
111 >>> dec(b'_h_e_l_l_o')
112 'HELLO'
112 'HELLO'
113
113
114 >>> enc(b'hello:world?')
114 >>> enc(b'hello:world?')
115 'hello~3aworld~3f'
115 'hello~3aworld~3f'
116 >>> dec(b'hello~3aworld~3f')
116 >>> dec(b'hello~3aworld~3f')
117 'hello:world?'
117 'hello:world?'
118
118
119 >>> enc(b'the\\x07quick\\xADshot')
119 >>> enc(b'the\\x07quick\\xADshot')
120 'the~07quick~adshot'
120 'the~07quick~adshot'
121 >>> dec(b'the~07quick~adshot')
121 >>> dec(b'the~07quick~adshot')
122 'the\\x07quick\\xadshot'
122 'the\\x07quick\\xadshot'
123 '''
123 '''
124 e = '_'
124 e = '_'
125 xchr = pycompat.bytechr
125 xchr = pycompat.bytechr
126 asciistr = list(map(xchr, range(127)))
126 asciistr = list(map(xchr, range(127)))
127 capitals = list(range(ord("A"), ord("Z") + 1))
127 capitals = list(range(ord("A"), ord("Z") + 1))
128
128
129 cmap = dict((x, x) for x in asciistr)
129 cmap = dict((x, x) for x in asciistr)
130 for x in _reserved():
130 for x in _reserved():
131 cmap[xchr(x)] = "~%02x" % x
131 cmap[xchr(x)] = "~%02x" % x
132 for x in capitals + [ord(e)]:
132 for x in capitals + [ord(e)]:
133 cmap[xchr(x)] = e + xchr(x).lower()
133 cmap[xchr(x)] = e + xchr(x).lower()
134
134
135 dmap = {}
135 dmap = {}
136 for k, v in cmap.iteritems():
136 for k, v in cmap.iteritems():
137 dmap[v] = k
137 dmap[v] = k
138 def decode(s):
138 def decode(s):
139 i = 0
139 i = 0
140 while i < len(s):
140 while i < len(s):
141 for l in pycompat.xrange(1, 4):
141 for l in pycompat.xrange(1, 4):
142 try:
142 try:
143 yield dmap[s[i:i + l]]
143 yield dmap[s[i:i + l]]
144 i += l
144 i += l
145 break
145 break
146 except KeyError:
146 except KeyError:
147 pass
147 pass
148 else:
148 else:
149 raise KeyError
149 raise KeyError
150 return (lambda s: ''.join([cmap[s[c:c + 1]]
150 return (lambda s: ''.join([cmap[s[c:c + 1]]
151 for c in pycompat.xrange(len(s))]),
151 for c in pycompat.xrange(len(s))]),
152 lambda s: ''.join(list(decode(s))))
152 lambda s: ''.join(list(decode(s))))
153
153
154 _encodefname, _decodefname = _buildencodefun()
154 _encodefname, _decodefname = _buildencodefun()
155
155
156 def encodefilename(s):
156 def encodefilename(s):
157 '''
157 '''
158 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
158 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
159 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
159 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
160 '''
160 '''
161 return _encodefname(encodedir(s))
161 return _encodefname(encodedir(s))
162
162
163 def decodefilename(s):
163 def decodefilename(s):
164 '''
164 '''
165 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
165 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
166 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
166 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
167 '''
167 '''
168 return decodedir(_decodefname(s))
168 return decodedir(_decodefname(s))
169
169
170 def _buildlowerencodefun():
170 def _buildlowerencodefun():
171 '''
171 '''
172 >>> f = _buildlowerencodefun()
172 >>> f = _buildlowerencodefun()
173 >>> f(b'nothing/special.txt')
173 >>> f(b'nothing/special.txt')
174 'nothing/special.txt'
174 'nothing/special.txt'
175 >>> f(b'HELLO')
175 >>> f(b'HELLO')
176 'hello'
176 'hello'
177 >>> f(b'hello:world?')
177 >>> f(b'hello:world?')
178 'hello~3aworld~3f'
178 'hello~3aworld~3f'
179 >>> f(b'the\\x07quick\\xADshot')
179 >>> f(b'the\\x07quick\\xADshot')
180 'the~07quick~adshot'
180 'the~07quick~adshot'
181 '''
181 '''
182 xchr = pycompat.bytechr
182 xchr = pycompat.bytechr
183 cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
183 cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
184 for x in _reserved():
184 for x in _reserved():
185 cmap[xchr(x)] = "~%02x" % x
185 cmap[xchr(x)] = "~%02x" % x
186 for x in range(ord("A"), ord("Z") + 1):
186 for x in range(ord("A"), ord("Z") + 1):
187 cmap[xchr(x)] = xchr(x).lower()
187 cmap[xchr(x)] = xchr(x).lower()
188 def lowerencode(s):
188 def lowerencode(s):
189 return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
189 return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
190 return lowerencode
190 return lowerencode
191
191
192 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
192 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
193
193
194 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
194 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
195 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
195 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
196 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
196 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
197 def _auxencode(path, dotencode):
197 def _auxencode(path, dotencode):
198 '''
198 '''
199 Encodes filenames containing names reserved by Windows or which end in
199 Encodes filenames containing names reserved by Windows or which end in
200 period or space. Does not touch other single reserved characters c.
200 period or space. Does not touch other single reserved characters c.
201 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
201 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
202 Additionally encodes space or period at the beginning, if dotencode is
202 Additionally encodes space or period at the beginning, if dotencode is
203 True. Parameter path is assumed to be all lowercase.
203 True. Parameter path is assumed to be all lowercase.
204 A segment only needs encoding if a reserved name appears as a
204 A segment only needs encoding if a reserved name appears as a
205 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
205 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
206 doesn't need encoding.
206 doesn't need encoding.
207
207
208 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
208 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
209 >>> _auxencode(s.split(b'/'), True)
209 >>> _auxencode(s.split(b'/'), True)
210 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
210 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
211 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
211 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
212 >>> _auxencode(s.split(b'/'), False)
212 >>> _auxencode(s.split(b'/'), False)
213 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
213 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
214 >>> _auxencode([b'foo. '], True)
214 >>> _auxencode([b'foo. '], True)
215 ['foo.~20']
215 ['foo.~20']
216 >>> _auxencode([b' .foo'], True)
216 >>> _auxencode([b' .foo'], True)
217 ['~20.foo']
217 ['~20.foo']
218 '''
218 '''
219 for i, n in enumerate(path):
219 for i, n in enumerate(path):
220 if not n:
220 if not n:
221 continue
221 continue
222 if dotencode and n[0] in '. ':
222 if dotencode and n[0] in '. ':
223 n = "~%02x" % ord(n[0:1]) + n[1:]
223 n = "~%02x" % ord(n[0:1]) + n[1:]
224 path[i] = n
224 path[i] = n
225 else:
225 else:
226 l = n.find('.')
226 l = n.find('.')
227 if l == -1:
227 if l == -1:
228 l = len(n)
228 l = len(n)
229 if ((l == 3 and n[:3] in _winres3) or
229 if ((l == 3 and n[:3] in _winres3) or
230 (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
230 (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
231 and n[:3] in _winres4)):
231 and n[:3] in _winres4)):
232 # encode third letter ('aux' -> 'au~78')
232 # encode third letter ('aux' -> 'au~78')
233 ec = "~%02x" % ord(n[2:3])
233 ec = "~%02x" % ord(n[2:3])
234 n = n[0:2] + ec + n[3:]
234 n = n[0:2] + ec + n[3:]
235 path[i] = n
235 path[i] = n
236 if n[-1] in '. ':
236 if n[-1] in '. ':
237 # encode last period or space ('foo...' -> 'foo..~2e')
237 # encode last period or space ('foo...' -> 'foo..~2e')
238 path[i] = n[:-1] + "~%02x" % ord(n[-1:])
238 path[i] = n[:-1] + "~%02x" % ord(n[-1:])
239 return path
239 return path
240
240
241 _maxstorepathlen = 120
241 _maxstorepathlen = 120
242 _dirprefixlen = 8
242 _dirprefixlen = 8
243 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
243 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
244
244
245 def _hashencode(path, dotencode):
245 def _hashencode(path, dotencode):
246 digest = node.hex(hashlib.sha1(path).digest())
246 digest = node.hex(hashlib.sha1(path).digest())
247 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
247 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
248 parts = _auxencode(le, dotencode)
248 parts = _auxencode(le, dotencode)
249 basename = parts[-1]
249 basename = parts[-1]
250 _root, ext = os.path.splitext(basename)
250 _root, ext = os.path.splitext(basename)
251 sdirs = []
251 sdirs = []
252 sdirslen = 0
252 sdirslen = 0
253 for p in parts[:-1]:
253 for p in parts[:-1]:
254 d = p[:_dirprefixlen]
254 d = p[:_dirprefixlen]
255 if d[-1] in '. ':
255 if d[-1] in '. ':
256 # Windows can't access dirs ending in period or space
256 # Windows can't access dirs ending in period or space
257 d = d[:-1] + '_'
257 d = d[:-1] + '_'
258 if sdirslen == 0:
258 if sdirslen == 0:
259 t = len(d)
259 t = len(d)
260 else:
260 else:
261 t = sdirslen + 1 + len(d)
261 t = sdirslen + 1 + len(d)
262 if t > _maxshortdirslen:
262 if t > _maxshortdirslen:
263 break
263 break
264 sdirs.append(d)
264 sdirs.append(d)
265 sdirslen = t
265 sdirslen = t
266 dirs = '/'.join(sdirs)
266 dirs = '/'.join(sdirs)
267 if len(dirs) > 0:
267 if len(dirs) > 0:
268 dirs += '/'
268 dirs += '/'
269 res = 'dh/' + dirs + digest + ext
269 res = 'dh/' + dirs + digest + ext
270 spaceleft = _maxstorepathlen - len(res)
270 spaceleft = _maxstorepathlen - len(res)
271 if spaceleft > 0:
271 if spaceleft > 0:
272 filler = basename[:spaceleft]
272 filler = basename[:spaceleft]
273 res = 'dh/' + dirs + filler + digest + ext
273 res = 'dh/' + dirs + filler + digest + ext
274 return res
274 return res
275
275
276 def _hybridencode(path, dotencode):
276 def _hybridencode(path, dotencode):
277 '''encodes path with a length limit
277 '''encodes path with a length limit
278
278
279 Encodes all paths that begin with 'data/', according to the following.
279 Encodes all paths that begin with 'data/', according to the following.
280
280
281 Default encoding (reversible):
281 Default encoding (reversible):
282
282
283 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
283 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
284 characters are encoded as '~xx', where xx is the two digit hex code
284 characters are encoded as '~xx', where xx is the two digit hex code
285 of the character (see encodefilename).
285 of the character (see encodefilename).
286 Relevant path components consisting of Windows reserved filenames are
286 Relevant path components consisting of Windows reserved filenames are
287 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
287 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
288
288
289 Hashed encoding (not reversible):
289 Hashed encoding (not reversible):
290
290
291 If the default-encoded path is longer than _maxstorepathlen, a
291 If the default-encoded path is longer than _maxstorepathlen, a
292 non-reversible hybrid hashing of the path is done instead.
292 non-reversible hybrid hashing of the path is done instead.
293 This encoding uses up to _dirprefixlen characters of all directory
293 This encoding uses up to _dirprefixlen characters of all directory
294 levels of the lowerencoded path, but not more levels than can fit into
294 levels of the lowerencoded path, but not more levels than can fit into
295 _maxshortdirslen.
295 _maxshortdirslen.
296 Then follows the filler followed by the sha digest of the full path.
296 Then follows the filler followed by the sha digest of the full path.
297 The filler is the beginning of the basename of the lowerencoded path
297 The filler is the beginning of the basename of the lowerencoded path
298 (the basename is everything after the last path separator). The filler
298 (the basename is everything after the last path separator). The filler
299 is as long as possible, filling in characters from the basename until
299 is as long as possible, filling in characters from the basename until
300 the encoded path has _maxstorepathlen characters (or all chars of the
300 the encoded path has _maxstorepathlen characters (or all chars of the
301 basename have been taken).
301 basename have been taken).
302 The extension (e.g. '.i' or '.d') is preserved.
302 The extension (e.g. '.i' or '.d') is preserved.
303
303
304 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
304 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
305 encoding was used.
305 encoding was used.
306 '''
306 '''
307 path = encodedir(path)
307 path = encodedir(path)
308 ef = _encodefname(path).split('/')
308 ef = _encodefname(path).split('/')
309 res = '/'.join(_auxencode(ef, dotencode))
309 res = '/'.join(_auxencode(ef, dotencode))
310 if len(res) > _maxstorepathlen:
310 if len(res) > _maxstorepathlen:
311 res = _hashencode(path, dotencode)
311 res = _hashencode(path, dotencode)
312 return res
312 return res
313
313
314 def _pathencode(path):
314 def _pathencode(path):
315 de = encodedir(path)
315 de = encodedir(path)
316 if len(path) > _maxstorepathlen:
316 if len(path) > _maxstorepathlen:
317 return _hashencode(de, True)
317 return _hashencode(de, True)
318 ef = _encodefname(de).split('/')
318 ef = _encodefname(de).split('/')
319 res = '/'.join(_auxencode(ef, True))
319 res = '/'.join(_auxencode(ef, True))
320 if len(res) > _maxstorepathlen:
320 if len(res) > _maxstorepathlen:
321 return _hashencode(de, True)
321 return _hashencode(de, True)
322 return res
322 return res
323
323
324 _pathencode = getattr(parsers, 'pathencode', _pathencode)
324 _pathencode = getattr(parsers, 'pathencode', _pathencode)
325
325
326 def _plainhybridencode(f):
326 def _plainhybridencode(f):
327 return _hybridencode(f, False)
327 return _hybridencode(f, False)
328
328
329 def _calcmode(vfs):
329 def _calcmode(vfs):
330 try:
330 try:
331 # files in .hg/ will be created using this mode
331 # files in .hg/ will be created using this mode
332 mode = vfs.stat().st_mode
332 mode = vfs.stat().st_mode
333 # avoid some useless chmods
333 # avoid some useless chmods
334 if (0o777 & ~util.umask) == (0o777 & mode):
334 if (0o777 & ~util.umask) == (0o777 & mode):
335 mode = None
335 mode = None
336 except OSError:
336 except OSError:
337 mode = None
337 mode = None
338 return mode
338 return mode
339
339
340 _data = ('bookmarks narrowspec data meta 00manifest.d 00manifest.i'
340 _data = ('bookmarks narrowspec data meta 00manifest.d 00manifest.i'
341 ' 00changelog.d 00changelog.i phaseroots obsstore')
341 ' 00changelog.d 00changelog.i phaseroots obsstore')
342
342
343 def isrevlog(f, kind, st):
343 def isrevlog(f, kind, st):
344 return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
344 return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
345
345
346 class basicstore(object):
346 class basicstore(object):
347 '''base class for local repository stores'''
347 '''base class for local repository stores'''
348 def __init__(self, path, vfstype):
348 def __init__(self, path, vfstype):
349 vfs = vfstype(path)
349 vfs = vfstype(path)
350 self.path = vfs.base
350 self.path = vfs.base
351 self.createmode = _calcmode(vfs)
351 self.createmode = _calcmode(vfs)
352 vfs.createmode = self.createmode
352 vfs.createmode = self.createmode
353 self.rawvfs = vfs
353 self.rawvfs = vfs
354 self.vfs = vfsmod.filtervfs(vfs, encodedir)
354 self.vfs = vfsmod.filtervfs(vfs, encodedir)
355 self.opener = self.vfs
355 self.opener = self.vfs
356
356
357 def join(self, f):
357 def join(self, f):
358 return self.path + '/' + encodedir(f)
358 return self.path + '/' + encodedir(f)
359
359
360 def _walk(self, relpath, recurse, filefilter=isrevlog):
360 def _walk(self, relpath, recurse, filefilter=isrevlog):
361 '''yields (unencoded, encoded, size)'''
361 '''yields (unencoded, encoded, size)'''
362 path = self.path
362 path = self.path
363 if relpath:
363 if relpath:
364 path += '/' + relpath
364 path += '/' + relpath
365 striplen = len(self.path) + 1
365 striplen = len(self.path) + 1
366 l = []
366 l = []
367 if self.rawvfs.isdir(path):
367 if self.rawvfs.isdir(path):
368 visit = [path]
368 visit = [path]
369 readdir = self.rawvfs.readdir
369 readdir = self.rawvfs.readdir
370 while visit:
370 while visit:
371 p = visit.pop()
371 p = visit.pop()
372 for f, kind, st in readdir(p, stat=True):
372 for f, kind, st in readdir(p, stat=True):
373 fp = p + '/' + f
373 fp = p + '/' + f
374 if filefilter(f, kind, st):
374 if filefilter(f, kind, st):
375 n = util.pconvert(fp[striplen:])
375 n = util.pconvert(fp[striplen:])
376 l.append((decodedir(n), n, st.st_size))
376 l.append((decodedir(n), n, st.st_size))
377 elif kind == stat.S_IFDIR and recurse:
377 elif kind == stat.S_IFDIR and recurse:
378 visit.append(fp)
378 visit.append(fp)
379 l.sort()
379 l.sort()
380 return l
380 return l
381
381
382 def datafiles(self, matcher=None):
382 def datafiles(self, matcher=None):
383 return self._walk('data', True) + self._walk('meta', True)
383 return self._walk('data', True) + self._walk('meta', True)
384
384
385 def topfiles(self):
385 def topfiles(self):
386 # yield manifest before changelog
386 # yield manifest before changelog
387 return reversed(self._walk('', False))
387 return reversed(self._walk('', False))
388
388
389 def walk(self, matcher=None):
389 def walk(self, matcher=None):
390 '''yields (unencoded, encoded, size)
390 '''yields (unencoded, encoded, size)
391
391
392 if a matcher is passed, storage files of only those tracked paths
392 if a matcher is passed, storage files of only those tracked paths
393 are passed with matches the matcher
393 are passed with matches the matcher
394 '''
394 '''
395 # yield data files first
395 # yield data files first
396 for x in self.datafiles(matcher):
396 for x in self.datafiles(matcher):
397 yield x
397 yield x
398 for x in self.topfiles():
398 for x in self.topfiles():
399 yield x
399 yield x
400
400
401 def copylist(self):
401 def copylist(self):
402 return ['requires'] + _data.split()
402 return ['requires'] + _data.split()
403
403
404 def write(self, tr):
404 def write(self, tr):
405 pass
405 pass
406
406
407 def invalidatecaches(self):
407 def invalidatecaches(self):
408 pass
408 pass
409
409
410 def markremoved(self, fn):
410 def markremoved(self, fn):
411 pass
411 pass
412
412
413 def __contains__(self, path):
413 def __contains__(self, path):
414 '''Checks if the store contains path'''
414 '''Checks if the store contains path'''
415 path = "/".join(("data", path))
415 path = "/".join(("data", path))
416 # file?
416 # file?
417 if self.vfs.exists(path + ".i"):
417 if self.vfs.exists(path + ".i"):
418 return True
418 return True
419 # dir?
419 # dir?
420 if not path.endswith("/"):
420 if not path.endswith("/"):
421 path = path + "/"
421 path = path + "/"
422 return self.vfs.exists(path)
422 return self.vfs.exists(path)
423
423
424 class encodedstore(basicstore):
424 class encodedstore(basicstore):
425 def __init__(self, path, vfstype):
425 def __init__(self, path, vfstype):
426 vfs = vfstype(path + '/store')
426 vfs = vfstype(path + '/store')
427 self.path = vfs.base
427 self.path = vfs.base
428 self.createmode = _calcmode(vfs)
428 self.createmode = _calcmode(vfs)
429 vfs.createmode = self.createmode
429 vfs.createmode = self.createmode
430 self.rawvfs = vfs
430 self.rawvfs = vfs
431 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
431 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
432 self.opener = self.vfs
432 self.opener = self.vfs
433
433
434 def datafiles(self, matcher=None):
434 def datafiles(self, matcher=None):
435 for a, b, size in super(encodedstore, self).datafiles():
435 for a, b, size in super(encodedstore, self).datafiles():
436 try:
436 try:
437 a = decodefilename(a)
437 a = decodefilename(a)
438 except KeyError:
438 except KeyError:
439 a = None
439 a = None
440 if a is not None and not _matchtrackedpath(a, matcher):
440 if a is not None and not _matchtrackedpath(a, matcher):
441 continue
441 continue
442 yield a, b, size
442 yield a, b, size
443
443
444 def join(self, f):
444 def join(self, f):
445 return self.path + '/' + encodefilename(f)
445 return self.path + '/' + encodefilename(f)
446
446
447 def copylist(self):
447 def copylist(self):
448 return (['requires', '00changelog.i'] +
448 return (['requires', '00changelog.i'] +
449 ['store/' + f for f in _data.split()])
449 ['store/' + f for f in _data.split()])
450
450
451 class fncache(object):
451 class fncache(object):
452 # the filename used to be partially encoded
452 # the filename used to be partially encoded
453 # hence the encodedir/decodedir dance
453 # hence the encodedir/decodedir dance
454 def __init__(self, vfs):
454 def __init__(self, vfs):
455 self.vfs = vfs
455 self.vfs = vfs
456 self.entries = None
456 self.entries = None
457 self._dirty = False
457 self._dirty = False
458 # set of new additions to fncache
458 # set of new additions to fncache
459 self.addls = set()
459 self.addls = set()
460
460
461 def _load(self):
461 def ensureloaded(self, warn=None):
462 '''read the fncache file if not already read.
463
464 If the file on disk is corrupted, raise. If warn is provided,
465 warn and keep going instead.'''
466 if self.entries is None:
467 self._load(warn)
468
469 def _load(self, warn=None):
462 '''fill the entries from the fncache file'''
470 '''fill the entries from the fncache file'''
463 self._dirty = False
471 self._dirty = False
464 try:
472 try:
465 fp = self.vfs('fncache', mode='rb')
473 fp = self.vfs('fncache', mode='rb')
466 except IOError:
474 except IOError:
467 # skip nonexistent file
475 # skip nonexistent file
468 self.entries = set()
476 self.entries = set()
469 return
477 return
470
478
471 self.entries = set()
479 self.entries = set()
472 chunk = b''
480 chunk = b''
473 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
481 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
474 chunk += c
482 chunk += c
475 try:
483 try:
476 p = chunk.rindex(b'\n')
484 p = chunk.rindex(b'\n')
477 self.entries.update(decodedir(chunk[:p + 1]).splitlines())
485 self.entries.update(decodedir(chunk[:p + 1]).splitlines())
478 chunk = chunk[p + 1:]
486 chunk = chunk[p + 1:]
479 except ValueError:
487 except ValueError:
480 # substring '\n' not found, maybe the entry is bigger than the
488 # substring '\n' not found, maybe the entry is bigger than the
481 # chunksize, so let's keep iterating
489 # chunksize, so let's keep iterating
482 pass
490 pass
483
491
484 if chunk:
492 if chunk:
485 raise error.Abort(_("fncache does not ends with a newline"),
493 msg = _("fncache does not ends with a newline")
486 hint=_("use 'hg debugrebuildfncache' to rebuild"
494 if warn:
487 " the fncache"))
495 warn(msg + '\n')
488 self._checkentries(fp)
496 else:
497 raise error.Abort(msg,
498 hint=_("use 'hg debugrebuildfncache' to "
499 "rebuild the fncache"))
500 self._checkentries(fp, warn)
489 fp.close()
501 fp.close()
490
502
491 def _checkentries(self, fp):
503 def _checkentries(self, fp, warn):
492 """ make sure there is no empty string in entries """
504 """ make sure there is no empty string in entries """
493 if '' in self.entries:
505 if '' in self.entries:
494 fp.seek(0)
506 fp.seek(0)
495 for n, line in enumerate(util.iterfile(fp)):
507 for n, line in enumerate(util.iterfile(fp)):
496 if not line.rstrip('\n'):
508 if not line.rstrip('\n'):
497 t = _('invalid entry in fncache, line %d') % (n + 1)
509 t = _('invalid entry in fncache, line %d') % (n + 1)
498 raise error.Abort(t)
510 if warn:
511 warn(t + '\n')
512 else:
513 raise error.Abort(t)
499
514
500 def write(self, tr):
515 def write(self, tr):
501 if self._dirty:
516 if self._dirty:
502 assert self.entries is not None
517 assert self.entries is not None
503 self.entries = self.entries | self.addls
518 self.entries = self.entries | self.addls
504 self.addls = set()
519 self.addls = set()
505 tr.addbackup('fncache')
520 tr.addbackup('fncache')
506 fp = self.vfs('fncache', mode='wb', atomictemp=True)
521 fp = self.vfs('fncache', mode='wb', atomictemp=True)
507 if self.entries:
522 if self.entries:
508 fp.write(encodedir('\n'.join(self.entries) + '\n'))
523 fp.write(encodedir('\n'.join(self.entries) + '\n'))
509 fp.close()
524 fp.close()
510 self._dirty = False
525 self._dirty = False
511 if self.addls:
526 if self.addls:
512 # if we have just new entries, let's append them to the fncache
527 # if we have just new entries, let's append them to the fncache
513 tr.addbackup('fncache')
528 tr.addbackup('fncache')
514 fp = self.vfs('fncache', mode='ab', atomictemp=True)
529 fp = self.vfs('fncache', mode='ab', atomictemp=True)
515 if self.addls:
530 if self.addls:
516 fp.write(encodedir('\n'.join(self.addls) + '\n'))
531 fp.write(encodedir('\n'.join(self.addls) + '\n'))
517 fp.close()
532 fp.close()
518 self.entries = None
533 self.entries = None
519 self.addls = set()
534 self.addls = set()
520
535
521 def add(self, fn):
536 def add(self, fn):
522 if self.entries is None:
537 if self.entries is None:
523 self._load()
538 self._load()
524 if fn not in self.entries:
539 if fn not in self.entries:
525 self.addls.add(fn)
540 self.addls.add(fn)
526
541
527 def remove(self, fn):
542 def remove(self, fn):
528 if self.entries is None:
543 if self.entries is None:
529 self._load()
544 self._load()
530 if fn in self.addls:
545 if fn in self.addls:
531 self.addls.remove(fn)
546 self.addls.remove(fn)
532 return
547 return
533 try:
548 try:
534 self.entries.remove(fn)
549 self.entries.remove(fn)
535 self._dirty = True
550 self._dirty = True
536 except KeyError:
551 except KeyError:
537 pass
552 pass
538
553
539 def __contains__(self, fn):
554 def __contains__(self, fn):
540 if fn in self.addls:
555 if fn in self.addls:
541 return True
556 return True
542 if self.entries is None:
557 if self.entries is None:
543 self._load()
558 self._load()
544 return fn in self.entries
559 return fn in self.entries
545
560
546 def __iter__(self):
561 def __iter__(self):
547 if self.entries is None:
562 if self.entries is None:
548 self._load()
563 self._load()
549 return iter(self.entries | self.addls)
564 return iter(self.entries | self.addls)
550
565
551 class _fncachevfs(vfsmod.proxyvfs):
566 class _fncachevfs(vfsmod.proxyvfs):
552 def __init__(self, vfs, fnc, encode):
567 def __init__(self, vfs, fnc, encode):
553 vfsmod.proxyvfs.__init__(self, vfs)
568 vfsmod.proxyvfs.__init__(self, vfs)
554 self.fncache = fnc
569 self.fncache = fnc
555 self.encode = encode
570 self.encode = encode
556
571
557 def __call__(self, path, mode='r', *args, **kw):
572 def __call__(self, path, mode='r', *args, **kw):
558 encoded = self.encode(path)
573 encoded = self.encode(path)
559 if mode not in ('r', 'rb') and (path.startswith('data/') or
574 if mode not in ('r', 'rb') and (path.startswith('data/') or
560 path.startswith('meta/')):
575 path.startswith('meta/')):
561 # do not trigger a fncache load when adding a file that already is
576 # do not trigger a fncache load when adding a file that already is
562 # known to exist.
577 # known to exist.
563 notload = self.fncache.entries is None and self.vfs.exists(encoded)
578 notload = self.fncache.entries is None and self.vfs.exists(encoded)
564 if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
579 if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
565 # when appending to an existing file, if the file has size zero,
580 # when appending to an existing file, if the file has size zero,
566 # it should be considered as missing. Such zero-size files are
581 # it should be considered as missing. Such zero-size files are
567 # the result of truncation when a transaction is aborted.
582 # the result of truncation when a transaction is aborted.
568 notload = False
583 notload = False
569 if not notload:
584 if not notload:
570 self.fncache.add(path)
585 self.fncache.add(path)
571 return self.vfs(encoded, mode, *args, **kw)
586 return self.vfs(encoded, mode, *args, **kw)
572
587
573 def join(self, path):
588 def join(self, path):
574 if path:
589 if path:
575 return self.vfs.join(self.encode(path))
590 return self.vfs.join(self.encode(path))
576 else:
591 else:
577 return self.vfs.join(path)
592 return self.vfs.join(path)
578
593
579 class fncachestore(basicstore):
594 class fncachestore(basicstore):
580 def __init__(self, path, vfstype, dotencode):
595 def __init__(self, path, vfstype, dotencode):
581 if dotencode:
596 if dotencode:
582 encode = _pathencode
597 encode = _pathencode
583 else:
598 else:
584 encode = _plainhybridencode
599 encode = _plainhybridencode
585 self.encode = encode
600 self.encode = encode
586 vfs = vfstype(path + '/store')
601 vfs = vfstype(path + '/store')
587 self.path = vfs.base
602 self.path = vfs.base
588 self.pathsep = self.path + '/'
603 self.pathsep = self.path + '/'
589 self.createmode = _calcmode(vfs)
604 self.createmode = _calcmode(vfs)
590 vfs.createmode = self.createmode
605 vfs.createmode = self.createmode
591 self.rawvfs = vfs
606 self.rawvfs = vfs
592 fnc = fncache(vfs)
607 fnc = fncache(vfs)
593 self.fncache = fnc
608 self.fncache = fnc
594 self.vfs = _fncachevfs(vfs, fnc, encode)
609 self.vfs = _fncachevfs(vfs, fnc, encode)
595 self.opener = self.vfs
610 self.opener = self.vfs
596
611
597 def join(self, f):
612 def join(self, f):
598 return self.pathsep + self.encode(f)
613 return self.pathsep + self.encode(f)
599
614
600 def getsize(self, path):
615 def getsize(self, path):
601 return self.rawvfs.stat(path).st_size
616 return self.rawvfs.stat(path).st_size
602
617
603 def datafiles(self, matcher=None):
618 def datafiles(self, matcher=None):
604 for f in sorted(self.fncache):
619 for f in sorted(self.fncache):
605 if not _matchtrackedpath(f, matcher):
620 if not _matchtrackedpath(f, matcher):
606 continue
621 continue
607 ef = self.encode(f)
622 ef = self.encode(f)
608 try:
623 try:
609 yield f, ef, self.getsize(ef)
624 yield f, ef, self.getsize(ef)
610 except OSError as err:
625 except OSError as err:
611 if err.errno != errno.ENOENT:
626 if err.errno != errno.ENOENT:
612 raise
627 raise
613
628
614 def copylist(self):
629 def copylist(self):
615 d = ('bookmarks narrowspec data meta dh fncache phaseroots obsstore'
630 d = ('bookmarks narrowspec data meta dh fncache phaseroots obsstore'
616 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
631 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
617 return (['requires', '00changelog.i'] +
632 return (['requires', '00changelog.i'] +
618 ['store/' + f for f in d.split()])
633 ['store/' + f for f in d.split()])
619
634
620 def write(self, tr):
635 def write(self, tr):
621 self.fncache.write(tr)
636 self.fncache.write(tr)
622
637
623 def invalidatecaches(self):
638 def invalidatecaches(self):
624 self.fncache.entries = None
639 self.fncache.entries = None
625 self.fncache.addls = set()
640 self.fncache.addls = set()
626
641
627 def markremoved(self, fn):
642 def markremoved(self, fn):
628 self.fncache.remove(fn)
643 self.fncache.remove(fn)
629
644
630 def _exists(self, f):
645 def _exists(self, f):
631 ef = self.encode(f)
646 ef = self.encode(f)
632 try:
647 try:
633 self.getsize(ef)
648 self.getsize(ef)
634 return True
649 return True
635 except OSError as err:
650 except OSError as err:
636 if err.errno != errno.ENOENT:
651 if err.errno != errno.ENOENT:
637 raise
652 raise
638 # nonexistent entry
653 # nonexistent entry
639 return False
654 return False
640
655
641 def __contains__(self, path):
656 def __contains__(self, path):
642 '''Checks if the store contains path'''
657 '''Checks if the store contains path'''
643 path = "/".join(("data", path))
658 path = "/".join(("data", path))
644 # check for files (exact match)
659 # check for files (exact match)
645 e = path + '.i'
660 e = path + '.i'
646 if e in self.fncache and self._exists(e):
661 if e in self.fncache and self._exists(e):
647 return True
662 return True
648 # now check for directories (prefix match)
663 # now check for directories (prefix match)
649 if not path.endswith('/'):
664 if not path.endswith('/'):
650 path += '/'
665 path += '/'
651 for e in self.fncache:
666 for e in self.fncache:
652 if e.startswith(path) and self._exists(e):
667 if e.startswith(path) and self._exists(e):
653 return True
668 return True
654 return False
669 return False
@@ -1,543 +1,545 b''
1 #require repofncache
1 #require repofncache
2
2
3 An extension which will set fncache chunksize to 1 byte to make sure that logic
3 An extension which will set fncache chunksize to 1 byte to make sure that logic
4 does not break
4 does not break
5
5
6 $ cat > chunksize.py <<EOF
6 $ cat > chunksize.py <<EOF
7 > from __future__ import absolute_import
7 > from __future__ import absolute_import
8 > from mercurial import store
8 > from mercurial import store
9 > store.fncache_chunksize = 1
9 > store.fncache_chunksize = 1
10 > EOF
10 > EOF
11
11
12 $ cat >> $HGRCPATH <<EOF
12 $ cat >> $HGRCPATH <<EOF
13 > [extensions]
13 > [extensions]
14 > chunksize = $TESTTMP/chunksize.py
14 > chunksize = $TESTTMP/chunksize.py
15 > EOF
15 > EOF
16
16
17 Init repo1:
17 Init repo1:
18
18
19 $ hg init repo1
19 $ hg init repo1
20 $ cd repo1
20 $ cd repo1
21 $ echo "some text" > a
21 $ echo "some text" > a
22 $ hg add
22 $ hg add
23 adding a
23 adding a
24 $ hg ci -m first
24 $ hg ci -m first
25 $ cat .hg/store/fncache | sort
25 $ cat .hg/store/fncache | sort
26 data/a.i
26 data/a.i
27
27
28 Testing a.i/b:
28 Testing a.i/b:
29
29
30 $ mkdir a.i
30 $ mkdir a.i
31 $ echo "some other text" > a.i/b
31 $ echo "some other text" > a.i/b
32 $ hg add
32 $ hg add
33 adding a.i/b
33 adding a.i/b
34 $ hg ci -m second
34 $ hg ci -m second
35 $ cat .hg/store/fncache | sort
35 $ cat .hg/store/fncache | sort
36 data/a.i
36 data/a.i
37 data/a.i.hg/b.i
37 data/a.i.hg/b.i
38
38
39 Testing a.i.hg/c:
39 Testing a.i.hg/c:
40
40
41 $ mkdir a.i.hg
41 $ mkdir a.i.hg
42 $ echo "yet another text" > a.i.hg/c
42 $ echo "yet another text" > a.i.hg/c
43 $ hg add
43 $ hg add
44 adding a.i.hg/c
44 adding a.i.hg/c
45 $ hg ci -m third
45 $ hg ci -m third
46 $ cat .hg/store/fncache | sort
46 $ cat .hg/store/fncache | sort
47 data/a.i
47 data/a.i
48 data/a.i.hg.hg/c.i
48 data/a.i.hg.hg/c.i
49 data/a.i.hg/b.i
49 data/a.i.hg/b.i
50
50
51 Testing verify:
51 Testing verify:
52
52
53 $ hg verify
53 $ hg verify
54 checking changesets
54 checking changesets
55 checking manifests
55 checking manifests
56 crosschecking files in changesets and manifests
56 crosschecking files in changesets and manifests
57 checking files
57 checking files
58 checked 3 changesets with 3 changes to 3 files
58 checked 3 changesets with 3 changes to 3 files
59
59
60 $ rm .hg/store/fncache
60 $ rm .hg/store/fncache
61
61
62 $ hg verify
62 $ hg verify
63 checking changesets
63 checking changesets
64 checking manifests
64 checking manifests
65 crosschecking files in changesets and manifests
65 crosschecking files in changesets and manifests
66 checking files
66 checking files
67 warning: revlog 'data/a.i' not in fncache!
67 warning: revlog 'data/a.i' not in fncache!
68 warning: revlog 'data/a.i.hg/c.i' not in fncache!
68 warning: revlog 'data/a.i.hg/c.i' not in fncache!
69 warning: revlog 'data/a.i/b.i' not in fncache!
69 warning: revlog 'data/a.i/b.i' not in fncache!
70 checked 3 changesets with 3 changes to 3 files
70 checked 3 changesets with 3 changes to 3 files
71 3 warnings encountered!
71 3 warnings encountered!
72 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
72 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
73
73
74 Follow the hint to make sure it works
74 Follow the hint to make sure it works
75
75
76 $ hg debugrebuildfncache
76 $ hg debugrebuildfncache
77 adding data/a.i
77 adding data/a.i
78 adding data/a.i.hg/c.i
78 adding data/a.i.hg/c.i
79 adding data/a.i/b.i
79 adding data/a.i/b.i
80 3 items added, 0 removed from fncache
80 3 items added, 0 removed from fncache
81
81
82 $ hg verify
82 $ hg verify
83 checking changesets
83 checking changesets
84 checking manifests
84 checking manifests
85 crosschecking files in changesets and manifests
85 crosschecking files in changesets and manifests
86 checking files
86 checking files
87 checked 3 changesets with 3 changes to 3 files
87 checked 3 changesets with 3 changes to 3 files
88
88
89 $ cd ..
89 $ cd ..
90
90
91 Non store repo:
91 Non store repo:
92
92
93 $ hg --config format.usestore=False init foo
93 $ hg --config format.usestore=False init foo
94 $ cd foo
94 $ cd foo
95 $ mkdir tst.d
95 $ mkdir tst.d
96 $ echo foo > tst.d/foo
96 $ echo foo > tst.d/foo
97 $ hg ci -Amfoo
97 $ hg ci -Amfoo
98 adding tst.d/foo
98 adding tst.d/foo
99 $ find .hg | sort
99 $ find .hg | sort
100 .hg
100 .hg
101 .hg/00changelog.i
101 .hg/00changelog.i
102 .hg/00manifest.i
102 .hg/00manifest.i
103 .hg/cache
103 .hg/cache
104 .hg/cache/branch2-served
104 .hg/cache/branch2-served
105 .hg/cache/rbc-names-v1
105 .hg/cache/rbc-names-v1
106 .hg/cache/rbc-revs-v1
106 .hg/cache/rbc-revs-v1
107 .hg/data
107 .hg/data
108 .hg/data/tst.d.hg
108 .hg/data/tst.d.hg
109 .hg/data/tst.d.hg/foo.i
109 .hg/data/tst.d.hg/foo.i
110 .hg/dirstate
110 .hg/dirstate
111 .hg/fsmonitor.state (fsmonitor !)
111 .hg/fsmonitor.state (fsmonitor !)
112 .hg/last-message.txt
112 .hg/last-message.txt
113 .hg/phaseroots
113 .hg/phaseroots
114 .hg/requires
114 .hg/requires
115 .hg/undo
115 .hg/undo
116 .hg/undo.backup.dirstate
116 .hg/undo.backup.dirstate
117 .hg/undo.backupfiles
117 .hg/undo.backupfiles
118 .hg/undo.bookmarks
118 .hg/undo.bookmarks
119 .hg/undo.branch
119 .hg/undo.branch
120 .hg/undo.desc
120 .hg/undo.desc
121 .hg/undo.dirstate
121 .hg/undo.dirstate
122 .hg/undo.phaseroots
122 .hg/undo.phaseroots
123 .hg/wcache
123 .hg/wcache
124 .hg/wcache/checkisexec (execbit !)
124 .hg/wcache/checkisexec (execbit !)
125 .hg/wcache/checklink (symlink !)
125 .hg/wcache/checklink (symlink !)
126 .hg/wcache/checklink-target (symlink !)
126 .hg/wcache/checklink-target (symlink !)
127 .hg/wcache/manifestfulltextcache (reporevlogstore !)
127 .hg/wcache/manifestfulltextcache (reporevlogstore !)
128 $ cd ..
128 $ cd ..
129
129
130 Non fncache repo:
130 Non fncache repo:
131
131
132 $ hg --config format.usefncache=False init bar
132 $ hg --config format.usefncache=False init bar
133 $ cd bar
133 $ cd bar
134 $ mkdir tst.d
134 $ mkdir tst.d
135 $ echo foo > tst.d/Foo
135 $ echo foo > tst.d/Foo
136 $ hg ci -Amfoo
136 $ hg ci -Amfoo
137 adding tst.d/Foo
137 adding tst.d/Foo
138 $ find .hg | sort
138 $ find .hg | sort
139 .hg
139 .hg
140 .hg/00changelog.i
140 .hg/00changelog.i
141 .hg/cache
141 .hg/cache
142 .hg/cache/branch2-served
142 .hg/cache/branch2-served
143 .hg/cache/rbc-names-v1
143 .hg/cache/rbc-names-v1
144 .hg/cache/rbc-revs-v1
144 .hg/cache/rbc-revs-v1
145 .hg/dirstate
145 .hg/dirstate
146 .hg/fsmonitor.state (fsmonitor !)
146 .hg/fsmonitor.state (fsmonitor !)
147 .hg/last-message.txt
147 .hg/last-message.txt
148 .hg/requires
148 .hg/requires
149 .hg/store
149 .hg/store
150 .hg/store/00changelog.i
150 .hg/store/00changelog.i
151 .hg/store/00manifest.i
151 .hg/store/00manifest.i
152 .hg/store/data
152 .hg/store/data
153 .hg/store/data/tst.d.hg
153 .hg/store/data/tst.d.hg
154 .hg/store/data/tst.d.hg/_foo.i
154 .hg/store/data/tst.d.hg/_foo.i
155 .hg/store/phaseroots
155 .hg/store/phaseroots
156 .hg/store/undo
156 .hg/store/undo
157 .hg/store/undo.backupfiles
157 .hg/store/undo.backupfiles
158 .hg/store/undo.phaseroots
158 .hg/store/undo.phaseroots
159 .hg/undo.backup.dirstate
159 .hg/undo.backup.dirstate
160 .hg/undo.bookmarks
160 .hg/undo.bookmarks
161 .hg/undo.branch
161 .hg/undo.branch
162 .hg/undo.desc
162 .hg/undo.desc
163 .hg/undo.dirstate
163 .hg/undo.dirstate
164 .hg/wcache
164 .hg/wcache
165 .hg/wcache/checkisexec (execbit !)
165 .hg/wcache/checkisexec (execbit !)
166 .hg/wcache/checklink (symlink !)
166 .hg/wcache/checklink (symlink !)
167 .hg/wcache/checklink-target (symlink !)
167 .hg/wcache/checklink-target (symlink !)
168 .hg/wcache/manifestfulltextcache (reporevlogstore !)
168 .hg/wcache/manifestfulltextcache (reporevlogstore !)
169 $ cd ..
169 $ cd ..
170
170
171 Encoding of reserved / long paths in the store
171 Encoding of reserved / long paths in the store
172
172
173 $ hg init r2
173 $ hg init r2
174 $ cd r2
174 $ cd r2
175 $ cat <<EOF > .hg/hgrc
175 $ cat <<EOF > .hg/hgrc
176 > [ui]
176 > [ui]
177 > portablefilenames = ignore
177 > portablefilenames = ignore
178 > EOF
178 > EOF
179
179
180 $ hg import -q --bypass - <<EOF
180 $ hg import -q --bypass - <<EOF
181 > # HG changeset patch
181 > # HG changeset patch
182 > # User test
182 > # User test
183 > # Date 0 0
183 > # Date 0 0
184 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
184 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
185 > # Parent 0000000000000000000000000000000000000000
185 > # Parent 0000000000000000000000000000000000000000
186 > 1
186 > 1
187 >
187 >
188 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
188 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
189 > new file mode 100644
189 > new file mode 100644
190 > --- /dev/null
190 > --- /dev/null
191 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
191 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
192 > @@ -0,0 +1,1 @@
192 > @@ -0,0 +1,1 @@
193 > +foo
193 > +foo
194 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
194 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
195 > new file mode 100644
195 > new file mode 100644
196 > --- /dev/null
196 > --- /dev/null
197 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
197 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
198 > @@ -0,0 +1,1 @@
198 > @@ -0,0 +1,1 @@
199 > +foo
199 > +foo
200 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
200 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
201 > new file mode 100644
201 > new file mode 100644
202 > --- /dev/null
202 > --- /dev/null
203 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
203 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
204 > @@ -0,0 +1,1 @@
204 > @@ -0,0 +1,1 @@
205 > +foo
205 > +foo
206 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
206 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
207 > new file mode 100644
207 > new file mode 100644
208 > --- /dev/null
208 > --- /dev/null
209 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
209 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
210 > @@ -0,0 +1,1 @@
210 > @@ -0,0 +1,1 @@
211 > +foo
211 > +foo
212 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
212 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
213 > new file mode 100644
213 > new file mode 100644
214 > --- /dev/null
214 > --- /dev/null
215 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
215 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
216 > @@ -0,0 +1,1 @@
216 > @@ -0,0 +1,1 @@
217 > +foo
217 > +foo
218 > EOF
218 > EOF
219
219
220 $ find .hg/store -name *.i | sort
220 $ find .hg/store -name *.i | sort
221 .hg/store/00changelog.i
221 .hg/store/00changelog.i
222 .hg/store/00manifest.i
222 .hg/store/00manifest.i
223 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
223 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
224 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
224 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
225 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
225 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
226 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
226 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
227 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
227 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
228
228
229 $ cd ..
229 $ cd ..
230
230
231 Aborting lock does not prevent fncache writes
231 Aborting lock does not prevent fncache writes
232
232
233 $ cat > exceptionext.py <<EOF
233 $ cat > exceptionext.py <<EOF
234 > from __future__ import absolute_import
234 > from __future__ import absolute_import
235 > import os
235 > import os
236 > from mercurial import commands, error, extensions
236 > from mercurial import commands, error, extensions
237 >
237 >
238 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
238 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
239 > def releasewrap():
239 > def releasewrap():
240 > l.held = False # ensure __del__ is a noop
240 > l.held = False # ensure __del__ is a noop
241 > raise error.Abort("forced lock failure")
241 > raise error.Abort("forced lock failure")
242 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
242 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
243 > return l
243 > return l
244 >
244 >
245 > def reposetup(ui, repo):
245 > def reposetup(ui, repo):
246 > extensions.wrapfunction(repo, '_lock', lockexception)
246 > extensions.wrapfunction(repo, '_lock', lockexception)
247 >
247 >
248 > cmdtable = {}
248 > cmdtable = {}
249 >
249 >
250 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
250 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
251 > # at the end of dispatching (for intentional "forced lcok failure")
251 > # at the end of dispatching (for intentional "forced lcok failure")
252 > def commitwrap(orig, ui, repo, *pats, **opts):
252 > def commitwrap(orig, ui, repo, *pats, **opts):
253 > repo = repo.unfiltered() # to use replaced repo._lock certainly
253 > repo = repo.unfiltered() # to use replaced repo._lock certainly
254 > wlock = repo.wlock()
254 > wlock = repo.wlock()
255 > try:
255 > try:
256 > return orig(ui, repo, *pats, **opts)
256 > return orig(ui, repo, *pats, **opts)
257 > finally:
257 > finally:
258 > # multiple 'relase()' is needed for complete releasing wlock,
258 > # multiple 'relase()' is needed for complete releasing wlock,
259 > # because "forced" abort at last releasing store lock
259 > # because "forced" abort at last releasing store lock
260 > # prevents wlock from being released at same 'lockmod.release()'
260 > # prevents wlock from being released at same 'lockmod.release()'
261 > for i in range(wlock.held):
261 > for i in range(wlock.held):
262 > wlock.release()
262 > wlock.release()
263 >
263 >
264 > def extsetup(ui):
264 > def extsetup(ui):
265 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
265 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
266 > EOF
266 > EOF
267 $ extpath=`pwd`/exceptionext.py
267 $ extpath=`pwd`/exceptionext.py
268 $ hg init fncachetxn
268 $ hg init fncachetxn
269 $ cd fncachetxn
269 $ cd fncachetxn
270 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
270 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
271 $ touch y
271 $ touch y
272 $ hg ci -qAm y
272 $ hg ci -qAm y
273 abort: forced lock failure
273 abort: forced lock failure
274 [255]
274 [255]
275 $ cat .hg/store/fncache
275 $ cat .hg/store/fncache
276 data/y.i
276 data/y.i
277
277
278 Aborting transaction prevents fncache change
278 Aborting transaction prevents fncache change
279
279
280 $ cat > ../exceptionext.py <<EOF
280 $ cat > ../exceptionext.py <<EOF
281 > from __future__ import absolute_import
281 > from __future__ import absolute_import
282 > import os
282 > import os
283 > from mercurial import commands, error, extensions, localrepo
283 > from mercurial import commands, error, extensions, localrepo
284 >
284 >
285 > def wrapper(orig, self, *args, **kwargs):
285 > def wrapper(orig, self, *args, **kwargs):
286 > tr = orig(self, *args, **kwargs)
286 > tr = orig(self, *args, **kwargs)
287 > def fail(tr):
287 > def fail(tr):
288 > raise error.Abort(b"forced transaction failure")
288 > raise error.Abort(b"forced transaction failure")
289 > # zzz prefix to ensure it sorted after store.write
289 > # zzz prefix to ensure it sorted after store.write
290 > tr.addfinalize(b'zzz-forcefails', fail)
290 > tr.addfinalize(b'zzz-forcefails', fail)
291 > return tr
291 > return tr
292 >
292 >
293 > def uisetup(ui):
293 > def uisetup(ui):
294 > extensions.wrapfunction(
294 > extensions.wrapfunction(
295 > localrepo.localrepository, b'transaction', wrapper)
295 > localrepo.localrepository, b'transaction', wrapper)
296 >
296 >
297 > cmdtable = {}
297 > cmdtable = {}
298 >
298 >
299 > EOF
299 > EOF
300
300
301 Clean cached version
301 Clean cached version
302 $ rm -f "${extpath}c"
302 $ rm -f "${extpath}c"
303 $ rm -Rf "`dirname $extpath`/__pycache__"
303 $ rm -Rf "`dirname $extpath`/__pycache__"
304
304
305 $ touch z
305 $ touch z
306 $ hg ci -qAm z
306 $ hg ci -qAm z
307 transaction abort!
307 transaction abort!
308 rollback completed
308 rollback completed
309 abort: forced transaction failure
309 abort: forced transaction failure
310 [255]
310 [255]
311 $ cat .hg/store/fncache
311 $ cat .hg/store/fncache
312 data/y.i
312 data/y.i
313
313
314 Aborted transactions can be recovered later
314 Aborted transactions can be recovered later
315
315
316 $ cat > ../exceptionext.py <<EOF
316 $ cat > ../exceptionext.py <<EOF
317 > from __future__ import absolute_import
317 > from __future__ import absolute_import
318 > import os
318 > import os
319 > from mercurial import (
319 > from mercurial import (
320 > commands,
320 > commands,
321 > error,
321 > error,
322 > extensions,
322 > extensions,
323 > localrepo,
323 > localrepo,
324 > transaction,
324 > transaction,
325 > )
325 > )
326 >
326 >
327 > def trwrapper(orig, self, *args, **kwargs):
327 > def trwrapper(orig, self, *args, **kwargs):
328 > tr = orig(self, *args, **kwargs)
328 > tr = orig(self, *args, **kwargs)
329 > def fail(tr):
329 > def fail(tr):
330 > raise error.Abort(b"forced transaction failure")
330 > raise error.Abort(b"forced transaction failure")
331 > # zzz prefix to ensure it sorted after store.write
331 > # zzz prefix to ensure it sorted after store.write
332 > tr.addfinalize(b'zzz-forcefails', fail)
332 > tr.addfinalize(b'zzz-forcefails', fail)
333 > return tr
333 > return tr
334 >
334 >
335 > def abortwrapper(orig, self, *args, **kwargs):
335 > def abortwrapper(orig, self, *args, **kwargs):
336 > raise error.Abort(b"forced transaction failure")
336 > raise error.Abort(b"forced transaction failure")
337 >
337 >
338 > def uisetup(ui):
338 > def uisetup(ui):
339 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
339 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
340 > trwrapper)
340 > trwrapper)
341 > extensions.wrapfunction(transaction.transaction, '_abort',
341 > extensions.wrapfunction(transaction.transaction, '_abort',
342 > abortwrapper)
342 > abortwrapper)
343 >
343 >
344 > cmdtable = {}
344 > cmdtable = {}
345 >
345 >
346 > EOF
346 > EOF
347
347
348 Clean cached versions
348 Clean cached versions
349 $ rm -f "${extpath}c"
349 $ rm -f "${extpath}c"
350 $ rm -Rf "`dirname $extpath`/__pycache__"
350 $ rm -Rf "`dirname $extpath`/__pycache__"
351
351
352 $ hg up -q 1
352 $ hg up -q 1
353 $ touch z
353 $ touch z
354 $ hg ci -qAm z 2>/dev/null
354 $ hg ci -qAm z 2>/dev/null
355 [255]
355 [255]
356 $ cat .hg/store/fncache | sort
356 $ cat .hg/store/fncache | sort
357 data/y.i
357 data/y.i
358 data/z.i
358 data/z.i
359 $ hg recover
359 $ hg recover
360 rolling back interrupted transaction
360 rolling back interrupted transaction
361 checking changesets
361 checking changesets
362 checking manifests
362 checking manifests
363 crosschecking files in changesets and manifests
363 crosschecking files in changesets and manifests
364 checking files
364 checking files
365 checked 1 changesets with 1 changes to 1 files
365 checked 1 changesets with 1 changes to 1 files
366 $ cat .hg/store/fncache
366 $ cat .hg/store/fncache
367 data/y.i
367 data/y.i
368
368
369 $ cd ..
369 $ cd ..
370
370
371 debugrebuildfncache does nothing unless repo has fncache requirement
371 debugrebuildfncache does nothing unless repo has fncache requirement
372
372
373 $ hg --config format.usefncache=false init nofncache
373 $ hg --config format.usefncache=false init nofncache
374 $ cd nofncache
374 $ cd nofncache
375 $ hg debugrebuildfncache
375 $ hg debugrebuildfncache
376 (not rebuilding fncache because repository does not support fncache)
376 (not rebuilding fncache because repository does not support fncache)
377
377
378 $ cd ..
378 $ cd ..
379
379
380 debugrebuildfncache works on empty repository
380 debugrebuildfncache works on empty repository
381
381
382 $ hg init empty
382 $ hg init empty
383 $ cd empty
383 $ cd empty
384 $ hg debugrebuildfncache
384 $ hg debugrebuildfncache
385 fncache already up to date
385 fncache already up to date
386 $ cd ..
386 $ cd ..
387
387
388 debugrebuildfncache on an up to date repository no-ops
388 debugrebuildfncache on an up to date repository no-ops
389
389
390 $ hg init repo
390 $ hg init repo
391 $ cd repo
391 $ cd repo
392 $ echo initial > foo
392 $ echo initial > foo
393 $ echo initial > .bar
393 $ echo initial > .bar
394 $ hg commit -A -m initial
394 $ hg commit -A -m initial
395 adding .bar
395 adding .bar
396 adding foo
396 adding foo
397
397
398 $ cat .hg/store/fncache | sort
398 $ cat .hg/store/fncache | sort
399 data/.bar.i
399 data/.bar.i
400 data/foo.i
400 data/foo.i
401
401
402 $ hg debugrebuildfncache
402 $ hg debugrebuildfncache
403 fncache already up to date
403 fncache already up to date
404
404
405 debugrebuildfncache restores deleted fncache file
405 debugrebuildfncache restores deleted fncache file
406
406
407 $ rm -f .hg/store/fncache
407 $ rm -f .hg/store/fncache
408 $ hg debugrebuildfncache
408 $ hg debugrebuildfncache
409 adding data/.bar.i
409 adding data/.bar.i
410 adding data/foo.i
410 adding data/foo.i
411 2 items added, 0 removed from fncache
411 2 items added, 0 removed from fncache
412
412
413 $ cat .hg/store/fncache | sort
413 $ cat .hg/store/fncache | sort
414 data/.bar.i
414 data/.bar.i
415 data/foo.i
415 data/foo.i
416
416
417 Rebuild after rebuild should no-op
417 Rebuild after rebuild should no-op
418
418
419 $ hg debugrebuildfncache
419 $ hg debugrebuildfncache
420 fncache already up to date
420 fncache already up to date
421
421
422 A single missing file should get restored, an extra file should be removed
422 A single missing file should get restored, an extra file should be removed
423
423
424 $ cat > .hg/store/fncache << EOF
424 $ cat > .hg/store/fncache << EOF
425 > data/foo.i
425 > data/foo.i
426 > data/bad-entry.i
426 > data/bad-entry.i
427 > EOF
427 > EOF
428
428
429 $ hg debugrebuildfncache
429 $ hg debugrebuildfncache
430 removing data/bad-entry.i
430 removing data/bad-entry.i
431 adding data/.bar.i
431 adding data/.bar.i
432 1 items added, 1 removed from fncache
432 1 items added, 1 removed from fncache
433
433
434 $ cat .hg/store/fncache | sort
434 $ cat .hg/store/fncache | sort
435 data/.bar.i
435 data/.bar.i
436 data/foo.i
436 data/foo.i
437
437
438 debugrebuildfncache fails to recover from truncated line in fncache
438 debugrebuildfncache recovers from truncated line in fncache
439
439
440 $ printf a > .hg/store/fncache
440 $ printf a > .hg/store/fncache
441 $ hg debugrebuildfncache
441 $ hg debugrebuildfncache
442 abort: fncache does not ends with a newline
442 fncache does not ends with a newline
443 (use 'hg debugrebuildfncache' to rebuild the fncache)
443 adding data/.bar.i
444 [255]
444 adding data/foo.i
445 2 items added, 0 removed from fncache
445
446
446 $ cat .hg/store/fncache | sort
447 $ cat .hg/store/fncache | sort
447 a
448 data/.bar.i
449 data/foo.i
448
450
449 $ cd ..
451 $ cd ..
450
452
451 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
453 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
452
454
453 $ hg --config format.dotencode=false init nodotencode
455 $ hg --config format.dotencode=false init nodotencode
454 $ cd nodotencode
456 $ cd nodotencode
455 $ echo initial > foo
457 $ echo initial > foo
456 $ echo initial > .bar
458 $ echo initial > .bar
457 $ hg commit -A -m initial
459 $ hg commit -A -m initial
458 adding .bar
460 adding .bar
459 adding foo
461 adding foo
460
462
461 $ cat .hg/store/fncache | sort
463 $ cat .hg/store/fncache | sort
462 data/.bar.i
464 data/.bar.i
463 data/foo.i
465 data/foo.i
464
466
465 $ rm .hg/store/fncache
467 $ rm .hg/store/fncache
466 $ hg debugrebuildfncache
468 $ hg debugrebuildfncache
467 adding data/.bar.i
469 adding data/.bar.i
468 adding data/foo.i
470 adding data/foo.i
469 2 items added, 0 removed from fncache
471 2 items added, 0 removed from fncache
470
472
471 $ cat .hg/store/fncache | sort
473 $ cat .hg/store/fncache | sort
472 data/.bar.i
474 data/.bar.i
473 data/foo.i
475 data/foo.i
474
476
475 $ cd ..
477 $ cd ..
476
478
477 In repositories that have accumulated a large number of files over time, the
479 In repositories that have accumulated a large number of files over time, the
478 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
480 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
479 The cache should not loaded when committing changes to existing files, or when unbundling
481 The cache should not loaded when committing changes to existing files, or when unbundling
480 changesets that only contain changes to existing files:
482 changesets that only contain changes to existing files:
481
483
482 $ cat > fncacheloadwarn.py << EOF
484 $ cat > fncacheloadwarn.py << EOF
483 > from __future__ import absolute_import
485 > from __future__ import absolute_import
484 > from mercurial import extensions, localrepo
486 > from mercurial import extensions, localrepo
485 >
487 >
486 > def extsetup(ui):
488 > def extsetup(ui):
487 > def wrapstore(orig, requirements, *args):
489 > def wrapstore(orig, requirements, *args):
488 > store = orig(requirements, *args)
490 > store = orig(requirements, *args)
489 > if b'store' in requirements and b'fncache' in requirements:
491 > if b'store' in requirements and b'fncache' in requirements:
490 > instrumentfncachestore(store, ui)
492 > instrumentfncachestore(store, ui)
491 > return store
493 > return store
492 > extensions.wrapfunction(localrepo, 'makestore', wrapstore)
494 > extensions.wrapfunction(localrepo, 'makestore', wrapstore)
493 >
495 >
494 > def instrumentfncachestore(fncachestore, ui):
496 > def instrumentfncachestore(fncachestore, ui):
495 > class instrumentedfncache(type(fncachestore.fncache)):
497 > class instrumentedfncache(type(fncachestore.fncache)):
496 > def _load(self):
498 > def _load(self):
497 > ui.warn(b'fncache load triggered!\n')
499 > ui.warn(b'fncache load triggered!\n')
498 > super(instrumentedfncache, self)._load()
500 > super(instrumentedfncache, self)._load()
499 > fncachestore.fncache.__class__ = instrumentedfncache
501 > fncachestore.fncache.__class__ = instrumentedfncache
500 > EOF
502 > EOF
501
503
502 $ fncachextpath=`pwd`/fncacheloadwarn.py
504 $ fncachextpath=`pwd`/fncacheloadwarn.py
503 $ hg init nofncacheload
505 $ hg init nofncacheload
504 $ cd nofncacheload
506 $ cd nofncacheload
505 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
507 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
506
508
507 A new file should trigger a load, as we'd want to update the fncache set in that case:
509 A new file should trigger a load, as we'd want to update the fncache set in that case:
508
510
509 $ touch foo
511 $ touch foo
510 $ hg ci -qAm foo
512 $ hg ci -qAm foo
511 fncache load triggered!
513 fncache load triggered!
512
514
513 But modifying that file should not:
515 But modifying that file should not:
514
516
515 $ echo bar >> foo
517 $ echo bar >> foo
516 $ hg ci -qm foo
518 $ hg ci -qm foo
517
519
518 If a transaction has been aborted, the zero-size truncated index file will
520 If a transaction has been aborted, the zero-size truncated index file will
519 not prevent the fncache from being loaded; rather than actually abort
521 not prevent the fncache from being loaded; rather than actually abort
520 a transaction, we simulate the situation by creating a zero-size index file:
522 a transaction, we simulate the situation by creating a zero-size index file:
521
523
522 $ touch .hg/store/data/bar.i
524 $ touch .hg/store/data/bar.i
523 $ touch bar
525 $ touch bar
524 $ hg ci -qAm bar
526 $ hg ci -qAm bar
525 fncache load triggered!
527 fncache load triggered!
526
528
527 Unbundling should follow the same rules; existing files should not cause a load:
529 Unbundling should follow the same rules; existing files should not cause a load:
528
530
529 $ hg clone -q . tobundle
531 $ hg clone -q . tobundle
530 $ echo 'new line' > tobundle/bar
532 $ echo 'new line' > tobundle/bar
531 $ hg -R tobundle ci -qm bar
533 $ hg -R tobundle ci -qm bar
532 $ hg -R tobundle bundle -q barupdated.hg
534 $ hg -R tobundle bundle -q barupdated.hg
533 $ hg unbundle -q barupdated.hg
535 $ hg unbundle -q barupdated.hg
534
536
535 but adding new files should:
537 but adding new files should:
536
538
537 $ touch tobundle/newfile
539 $ touch tobundle/newfile
538 $ hg -R tobundle ci -qAm newfile
540 $ hg -R tobundle ci -qAm newfile
539 $ hg -R tobundle bundle -q newfile.hg
541 $ hg -R tobundle bundle -q newfile.hg
540 $ hg unbundle -q newfile.hg
542 $ hg unbundle -q newfile.hg
541 fncache load triggered!
543 fncache load triggered!
542
544
543 $ cd ..
545 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now