##// END OF EJS Templates
narrow: detect if narrowspec was changed in a different share...
Martin von Zweigbergk -
r41072:ce0bc295 default
parent child Browse files
Show More
@@ -1,24 +1,21 b''
1 Integration with the share extension needs improvement. Right now
2 we've seen some odd bugs.
3
4 Address commentary in manifest.excludedmanifestrevlog.add -
1 Address commentary in manifest.excludedmanifestrevlog.add -
5 specifically we should improve the collaboration with core so that
2 specifically we should improve the collaboration with core so that
6 add() never gets called on an excluded directory and we can improve
3 add() never gets called on an excluded directory and we can improve
7 the stand-in to raise a ProgrammingError.
4 the stand-in to raise a ProgrammingError.
8
5
9 Reason more completely about rename-filtering logic in
6 Reason more completely about rename-filtering logic in
10 narrowfilelog. There could be some surprises lurking there.
7 narrowfilelog. There could be some surprises lurking there.
11
8
12 Formally document the narrowspec format. For bonus points, unify with the
9 Formally document the narrowspec format. For bonus points, unify with the
13 server-specified narrowspec format.
10 server-specified narrowspec format.
14
11
15 narrowrepo.setnarrowpats() or narrowspec.save() need to make sure
12 narrowrepo.setnarrowpats() or narrowspec.save() need to make sure
16 they're holding the wlock.
13 they're holding the wlock.
17
14
18 The follinwg places do an unrestricted dirstate walk (including files outside the
15 The follinwg places do an unrestricted dirstate walk (including files outside the
19 narrowspec). Some of them should perhaps not do that.
16 narrowspec). Some of them should perhaps not do that.
20
17
21 * debugfileset
18 * debugfileset
22 * perfwalk
19 * perfwalk
23 * sparse (but restricted to sparse config)
20 * sparse (but restricted to sparse config)
24 * largefiles
21 * largefiles
@@ -1,472 +1,481 b''
1 # narrowcommands.py - command modifications for narrowhg extension
1 # narrowcommands.py - command modifications for narrowhg extension
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import itertools
9 import itertools
10 import os
10 import os
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import (
13 from mercurial import (
14 bundle2,
14 bundle2,
15 cmdutil,
15 cmdutil,
16 commands,
16 commands,
17 discovery,
17 discovery,
18 encoding,
18 encoding,
19 error,
19 error,
20 exchange,
20 exchange,
21 extensions,
21 extensions,
22 hg,
22 hg,
23 merge,
23 merge,
24 narrowspec,
24 narrowspec,
25 node,
25 node,
26 pycompat,
26 pycompat,
27 registrar,
27 registrar,
28 repair,
28 repair,
29 repository,
29 repository,
30 repoview,
30 repoview,
31 sparse,
31 sparse,
32 util,
32 util,
33 wireprototypes,
33 wireprototypes,
34 )
34 )
35
35
36 table = {}
36 table = {}
37 command = registrar.command(table)
37 command = registrar.command(table)
38
38
39 def setup():
39 def setup():
40 """Wraps user-facing mercurial commands with narrow-aware versions."""
40 """Wraps user-facing mercurial commands with narrow-aware versions."""
41
41
42 entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
42 entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
43 entry[1].append(('', 'narrow', None,
43 entry[1].append(('', 'narrow', None,
44 _("create a narrow clone of select files")))
44 _("create a narrow clone of select files")))
45 entry[1].append(('', 'depth', '',
45 entry[1].append(('', 'depth', '',
46 _("limit the history fetched by distance from heads")))
46 _("limit the history fetched by distance from heads")))
47 entry[1].append(('', 'narrowspec', '',
47 entry[1].append(('', 'narrowspec', '',
48 _("read narrowspecs from file")))
48 _("read narrowspecs from file")))
49 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
49 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
50 if 'sparse' not in extensions.enabled():
50 if 'sparse' not in extensions.enabled():
51 entry[1].append(('', 'include', [],
51 entry[1].append(('', 'include', [],
52 _("specifically fetch this file/directory")))
52 _("specifically fetch this file/directory")))
53 entry[1].append(
53 entry[1].append(
54 ('', 'exclude', [],
54 ('', 'exclude', [],
55 _("do not fetch this file/directory, even if included")))
55 _("do not fetch this file/directory, even if included")))
56
56
57 entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
57 entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
58 entry[1].append(('', 'depth', '',
58 entry[1].append(('', 'depth', '',
59 _("limit the history fetched by distance from heads")))
59 _("limit the history fetched by distance from heads")))
60
60
61 extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
61 extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
62
62
63 def clonenarrowcmd(orig, ui, repo, *args, **opts):
63 def clonenarrowcmd(orig, ui, repo, *args, **opts):
64 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
64 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
65 opts = pycompat.byteskwargs(opts)
65 opts = pycompat.byteskwargs(opts)
66 wrappedextraprepare = util.nullcontextmanager()
66 wrappedextraprepare = util.nullcontextmanager()
67 narrowspecfile = opts['narrowspec']
67 narrowspecfile = opts['narrowspec']
68
68
69 if narrowspecfile:
69 if narrowspecfile:
70 filepath = os.path.join(encoding.getcwd(), narrowspecfile)
70 filepath = os.path.join(encoding.getcwd(), narrowspecfile)
71 ui.status(_("reading narrowspec from '%s'\n") % filepath)
71 ui.status(_("reading narrowspec from '%s'\n") % filepath)
72 try:
72 try:
73 fdata = util.readfile(filepath)
73 fdata = util.readfile(filepath)
74 except IOError as inst:
74 except IOError as inst:
75 raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
75 raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
76 (filepath, encoding.strtolocal(inst.strerror)))
76 (filepath, encoding.strtolocal(inst.strerror)))
77
77
78 includes, excludes, profiles = sparse.parseconfig(ui, fdata, 'narrow')
78 includes, excludes, profiles = sparse.parseconfig(ui, fdata, 'narrow')
79 if profiles:
79 if profiles:
80 raise error.Abort(_("cannot specify other files using '%include' in"
80 raise error.Abort(_("cannot specify other files using '%include' in"
81 " narrowspec"))
81 " narrowspec"))
82
82
83 narrowspec.validatepatterns(includes)
83 narrowspec.validatepatterns(includes)
84 narrowspec.validatepatterns(excludes)
84 narrowspec.validatepatterns(excludes)
85
85
86 # narrowspec is passed so we should assume that user wants narrow clone
86 # narrowspec is passed so we should assume that user wants narrow clone
87 opts['narrow'] = True
87 opts['narrow'] = True
88 opts['include'].extend(includes)
88 opts['include'].extend(includes)
89 opts['exclude'].extend(excludes)
89 opts['exclude'].extend(excludes)
90
90
91 if opts['narrow']:
91 if opts['narrow']:
92 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
92 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
93 orig(pullop, kwargs)
93 orig(pullop, kwargs)
94
94
95 if opts.get('depth'):
95 if opts.get('depth'):
96 kwargs['depth'] = opts['depth']
96 kwargs['depth'] = opts['depth']
97 wrappedextraprepare = extensions.wrappedfunction(exchange,
97 wrappedextraprepare = extensions.wrappedfunction(exchange,
98 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
98 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
99
99
100 with wrappedextraprepare:
100 with wrappedextraprepare:
101 return orig(ui, repo, *args, **pycompat.strkwargs(opts))
101 return orig(ui, repo, *args, **pycompat.strkwargs(opts))
102
102
103 def pullnarrowcmd(orig, ui, repo, *args, **opts):
103 def pullnarrowcmd(orig, ui, repo, *args, **opts):
104 """Wraps pull command to allow modifying narrow spec."""
104 """Wraps pull command to allow modifying narrow spec."""
105 wrappedextraprepare = util.nullcontextmanager()
105 wrappedextraprepare = util.nullcontextmanager()
106 if repository.NARROW_REQUIREMENT in repo.requirements:
106 if repository.NARROW_REQUIREMENT in repo.requirements:
107
107
108 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
108 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
109 orig(pullop, kwargs)
109 orig(pullop, kwargs)
110 if opts.get(r'depth'):
110 if opts.get(r'depth'):
111 kwargs['depth'] = opts[r'depth']
111 kwargs['depth'] = opts[r'depth']
112 wrappedextraprepare = extensions.wrappedfunction(exchange,
112 wrappedextraprepare = extensions.wrappedfunction(exchange,
113 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
113 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
114
114
115 with wrappedextraprepare:
115 with wrappedextraprepare:
116 return orig(ui, repo, *args, **opts)
116 return orig(ui, repo, *args, **opts)
117
117
118 def archivenarrowcmd(orig, ui, repo, *args, **opts):
118 def archivenarrowcmd(orig, ui, repo, *args, **opts):
119 """Wraps archive command to narrow the default includes."""
119 """Wraps archive command to narrow the default includes."""
120 if repository.NARROW_REQUIREMENT in repo.requirements:
120 if repository.NARROW_REQUIREMENT in repo.requirements:
121 repo_includes, repo_excludes = repo.narrowpats
121 repo_includes, repo_excludes = repo.narrowpats
122 includes = set(opts.get(r'include', []))
122 includes = set(opts.get(r'include', []))
123 excludes = set(opts.get(r'exclude', []))
123 excludes = set(opts.get(r'exclude', []))
124 includes, excludes, unused_invalid = narrowspec.restrictpatterns(
124 includes, excludes, unused_invalid = narrowspec.restrictpatterns(
125 includes, excludes, repo_includes, repo_excludes)
125 includes, excludes, repo_includes, repo_excludes)
126 if includes:
126 if includes:
127 opts[r'include'] = includes
127 opts[r'include'] = includes
128 if excludes:
128 if excludes:
129 opts[r'exclude'] = excludes
129 opts[r'exclude'] = excludes
130 return orig(ui, repo, *args, **opts)
130 return orig(ui, repo, *args, **opts)
131
131
132 def pullbundle2extraprepare(orig, pullop, kwargs):
132 def pullbundle2extraprepare(orig, pullop, kwargs):
133 repo = pullop.repo
133 repo = pullop.repo
134 if repository.NARROW_REQUIREMENT not in repo.requirements:
134 if repository.NARROW_REQUIREMENT not in repo.requirements:
135 return orig(pullop, kwargs)
135 return orig(pullop, kwargs)
136
136
137 if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
137 if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
138 raise error.Abort(_("server does not support narrow clones"))
138 raise error.Abort(_("server does not support narrow clones"))
139 orig(pullop, kwargs)
139 orig(pullop, kwargs)
140 kwargs['narrow'] = True
140 kwargs['narrow'] = True
141 include, exclude = repo.narrowpats
141 include, exclude = repo.narrowpats
142 kwargs['oldincludepats'] = include
142 kwargs['oldincludepats'] = include
143 kwargs['oldexcludepats'] = exclude
143 kwargs['oldexcludepats'] = exclude
144 if include:
144 if include:
145 kwargs['includepats'] = include
145 kwargs['includepats'] = include
146 if exclude:
146 if exclude:
147 kwargs['excludepats'] = exclude
147 kwargs['excludepats'] = exclude
148 # calculate known nodes only in ellipses cases because in non-ellipses cases
148 # calculate known nodes only in ellipses cases because in non-ellipses cases
149 # we have all the nodes
149 # we have all the nodes
150 if wireprototypes.ELLIPSESCAP in pullop.remote.capabilities():
150 if wireprototypes.ELLIPSESCAP in pullop.remote.capabilities():
151 kwargs['known'] = [node.hex(ctx.node()) for ctx in
151 kwargs['known'] = [node.hex(ctx.node()) for ctx in
152 repo.set('::%ln', pullop.common)
152 repo.set('::%ln', pullop.common)
153 if ctx.node() != node.nullid]
153 if ctx.node() != node.nullid]
154 if not kwargs['known']:
154 if not kwargs['known']:
155 # Mercurial serializes an empty list as '' and deserializes it as
155 # Mercurial serializes an empty list as '' and deserializes it as
156 # [''], so delete it instead to avoid handling the empty string on
156 # [''], so delete it instead to avoid handling the empty string on
157 # the server.
157 # the server.
158 del kwargs['known']
158 del kwargs['known']
159
159
160 extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
160 extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
161 pullbundle2extraprepare)
161 pullbundle2extraprepare)
162
162
163 # This is an extension point for filesystems that need to do something other
163 # This is an extension point for filesystems that need to do something other
164 # than just blindly unlink the files. It's not clear what arguments would be
164 # than just blindly unlink the files. It's not clear what arguments would be
165 # useful, so we're passing in a fair number of them, some of them redundant.
165 # useful, so we're passing in a fair number of them, some of them redundant.
166 def _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes, newexcludes,
166 def _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes, newexcludes,
167 oldmatch, newmatch):
167 oldmatch, newmatch):
168 for f in repo.dirstate:
168 for f in repo.dirstate:
169 if not newmatch(f):
169 if not newmatch(f):
170 repo.dirstate.drop(f)
170 repo.dirstate.drop(f)
171 repo.wvfs.unlinkpath(f)
171 repo.wvfs.unlinkpath(f)
172
172
173 def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
173 def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
174 newincludes, newexcludes, force):
174 newincludes, newexcludes, force):
175 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
175 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
176 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
176 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
177
177
178 # This is essentially doing "hg outgoing" to find all local-only
178 # This is essentially doing "hg outgoing" to find all local-only
179 # commits. We will then check that the local-only commits don't
179 # commits. We will then check that the local-only commits don't
180 # have any changes to files that will be untracked.
180 # have any changes to files that will be untracked.
181 unfi = repo.unfiltered()
181 unfi = repo.unfiltered()
182 outgoing = discovery.findcommonoutgoing(unfi, remote,
182 outgoing = discovery.findcommonoutgoing(unfi, remote,
183 commoninc=commoninc)
183 commoninc=commoninc)
184 ui.status(_('looking for local changes to affected paths\n'))
184 ui.status(_('looking for local changes to affected paths\n'))
185 localnodes = []
185 localnodes = []
186 for n in itertools.chain(outgoing.missing, outgoing.excluded):
186 for n in itertools.chain(outgoing.missing, outgoing.excluded):
187 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
187 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
188 localnodes.append(n)
188 localnodes.append(n)
189 revstostrip = unfi.revs('descendants(%ln)', localnodes)
189 revstostrip = unfi.revs('descendants(%ln)', localnodes)
190 hiddenrevs = repoview.filterrevs(repo, 'visible')
190 hiddenrevs = repoview.filterrevs(repo, 'visible')
191 visibletostrip = list(repo.changelog.node(r)
191 visibletostrip = list(repo.changelog.node(r)
192 for r in (revstostrip - hiddenrevs))
192 for r in (revstostrip - hiddenrevs))
193 if visibletostrip:
193 if visibletostrip:
194 ui.status(_('The following changeset(s) or their ancestors have '
194 ui.status(_('The following changeset(s) or their ancestors have '
195 'local changes not on the remote:\n'))
195 'local changes not on the remote:\n'))
196 maxnodes = 10
196 maxnodes = 10
197 if ui.verbose or len(visibletostrip) <= maxnodes:
197 if ui.verbose or len(visibletostrip) <= maxnodes:
198 for n in visibletostrip:
198 for n in visibletostrip:
199 ui.status('%s\n' % node.short(n))
199 ui.status('%s\n' % node.short(n))
200 else:
200 else:
201 for n in visibletostrip[:maxnodes]:
201 for n in visibletostrip[:maxnodes]:
202 ui.status('%s\n' % node.short(n))
202 ui.status('%s\n' % node.short(n))
203 ui.status(_('...and %d more, use --verbose to list all\n') %
203 ui.status(_('...and %d more, use --verbose to list all\n') %
204 (len(visibletostrip) - maxnodes))
204 (len(visibletostrip) - maxnodes))
205 if not force:
205 if not force:
206 raise error.Abort(_('local changes found'),
206 raise error.Abort(_('local changes found'),
207 hint=_('use --force-delete-local-changes to '
207 hint=_('use --force-delete-local-changes to '
208 'ignore'))
208 'ignore'))
209
209
210 with ui.uninterruptable():
210 with ui.uninterruptable():
211 if revstostrip:
211 if revstostrip:
212 tostrip = [unfi.changelog.node(r) for r in revstostrip]
212 tostrip = [unfi.changelog.node(r) for r in revstostrip]
213 if repo['.'].node() in tostrip:
213 if repo['.'].node() in tostrip:
214 # stripping working copy, so move to a different commit first
214 # stripping working copy, so move to a different commit first
215 urev = max(repo.revs('(::%n) - %ln + null',
215 urev = max(repo.revs('(::%n) - %ln + null',
216 repo['.'].node(), visibletostrip))
216 repo['.'].node(), visibletostrip))
217 hg.clean(repo, urev)
217 hg.clean(repo, urev)
218 overrides = {('devel', 'strip-obsmarkers'): False}
218 overrides = {('devel', 'strip-obsmarkers'): False}
219 with ui.configoverride(overrides, 'narrow'):
219 with ui.configoverride(overrides, 'narrow'):
220 repair.strip(ui, unfi, tostrip, topic='narrow')
220 repair.strip(ui, unfi, tostrip, topic='narrow')
221
221
222 todelete = []
222 todelete = []
223 for f, f2, size in repo.store.datafiles():
223 for f, f2, size in repo.store.datafiles():
224 if f.startswith('data/'):
224 if f.startswith('data/'):
225 file = f[5:-2]
225 file = f[5:-2]
226 if not newmatch(file):
226 if not newmatch(file):
227 todelete.append(f)
227 todelete.append(f)
228 elif f.startswith('meta/'):
228 elif f.startswith('meta/'):
229 dir = f[5:-13]
229 dir = f[5:-13]
230 dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
230 dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
231 include = True
231 include = True
232 for d in dirs:
232 for d in dirs:
233 visit = newmatch.visitdir(d)
233 visit = newmatch.visitdir(d)
234 if not visit:
234 if not visit:
235 include = False
235 include = False
236 break
236 break
237 if visit == 'all':
237 if visit == 'all':
238 break
238 break
239 if not include:
239 if not include:
240 todelete.append(f)
240 todelete.append(f)
241
241
242 repo.destroying()
242 repo.destroying()
243
243
244 with repo.transaction("narrowing"):
244 with repo.transaction("narrowing"):
245 # Update narrowspec before removing revlogs, so repo won't be
245 # Update narrowspec before removing revlogs, so repo won't be
246 # corrupt in case of crash
246 # corrupt in case of crash
247 repo.setnarrowpats(newincludes, newexcludes)
247 repo.setnarrowpats(newincludes, newexcludes)
248
248
249 for f in todelete:
249 for f in todelete:
250 ui.status(_('deleting %s\n') % f)
250 ui.status(_('deleting %s\n') % f)
251 util.unlinkpath(repo.svfs.join(f))
251 util.unlinkpath(repo.svfs.join(f))
252 repo.store.markremoved(f)
252 repo.store.markremoved(f)
253
253
254 _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes,
254 _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes,
255 newexcludes, oldmatch, newmatch)
255 newexcludes, oldmatch, newmatch)
256
256
257 repo.destroyed()
257 repo.destroyed()
258
258
259 def _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes,
259 def _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes,
260 newincludes, newexcludes):
260 newincludes, newexcludes):
261 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
261 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
262
262
263 # for now we assume that if a server has ellipses enabled, we will be
263 # for now we assume that if a server has ellipses enabled, we will be
264 # exchanging ellipses nodes. In future we should add ellipses as a client
264 # exchanging ellipses nodes. In future we should add ellipses as a client
265 # side requirement (maybe) to distinguish a client is shallow or not and
265 # side requirement (maybe) to distinguish a client is shallow or not and
266 # then send that information to server whether we want ellipses or not.
266 # then send that information to server whether we want ellipses or not.
267 # Theoretically a non-ellipses repo should be able to use narrow
267 # Theoretically a non-ellipses repo should be able to use narrow
268 # functionality from an ellipses enabled server
268 # functionality from an ellipses enabled server
269 ellipsesremote = wireprototypes.ELLIPSESCAP in remote.capabilities()
269 ellipsesremote = wireprototypes.ELLIPSESCAP in remote.capabilities()
270
270
271 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
271 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
272 orig(pullop, kwargs)
272 orig(pullop, kwargs)
273 # The old{in,ex}cludepats have already been set by orig()
273 # The old{in,ex}cludepats have already been set by orig()
274 kwargs['includepats'] = newincludes
274 kwargs['includepats'] = newincludes
275 kwargs['excludepats'] = newexcludes
275 kwargs['excludepats'] = newexcludes
276 wrappedextraprepare = extensions.wrappedfunction(exchange,
276 wrappedextraprepare = extensions.wrappedfunction(exchange,
277 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
277 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
278
278
279 # define a function that narrowbundle2 can call after creating the
279 # define a function that narrowbundle2 can call after creating the
280 # backup bundle, but before applying the bundle from the server
280 # backup bundle, but before applying the bundle from the server
281 def setnewnarrowpats():
281 def setnewnarrowpats():
282 repo.setnarrowpats(newincludes, newexcludes)
282 repo.setnarrowpats(newincludes, newexcludes)
283 repo.setnewnarrowpats = setnewnarrowpats
283 repo.setnewnarrowpats = setnewnarrowpats
284 # silence the devel-warning of applying an empty changegroup
284 # silence the devel-warning of applying an empty changegroup
285 overrides = {('devel', 'all-warnings'): False}
285 overrides = {('devel', 'all-warnings'): False}
286
286
287 with ui.uninterruptable():
287 with ui.uninterruptable():
288 common = commoninc[0]
288 common = commoninc[0]
289 if ellipsesremote:
289 if ellipsesremote:
290 ds = repo.dirstate
290 ds = repo.dirstate
291 p1, p2 = ds.p1(), ds.p2()
291 p1, p2 = ds.p1(), ds.p2()
292 with ds.parentchange():
292 with ds.parentchange():
293 ds.setparents(node.nullid, node.nullid)
293 ds.setparents(node.nullid, node.nullid)
294 with wrappedextraprepare,\
294 with wrappedextraprepare,\
295 repo.ui.configoverride(overrides, 'widen'):
295 repo.ui.configoverride(overrides, 'widen'):
296 exchange.pull(repo, remote, heads=common)
296 exchange.pull(repo, remote, heads=common)
297 with ds.parentchange():
297 with ds.parentchange():
298 ds.setparents(p1, p2)
298 ds.setparents(p1, p2)
299 else:
299 else:
300 with remote.commandexecutor() as e:
300 with remote.commandexecutor() as e:
301 bundle = e.callcommand('narrow_widen', {
301 bundle = e.callcommand('narrow_widen', {
302 'oldincludes': oldincludes,
302 'oldincludes': oldincludes,
303 'oldexcludes': oldexcludes,
303 'oldexcludes': oldexcludes,
304 'newincludes': newincludes,
304 'newincludes': newincludes,
305 'newexcludes': newexcludes,
305 'newexcludes': newexcludes,
306 'cgversion': '03',
306 'cgversion': '03',
307 'commonheads': common,
307 'commonheads': common,
308 'known': [],
308 'known': [],
309 'ellipses': False,
309 'ellipses': False,
310 }).result()
310 }).result()
311
311
312 with repo.transaction('widening') as tr,\
312 with repo.transaction('widening') as tr,\
313 repo.ui.configoverride(overrides, 'widen'):
313 repo.ui.configoverride(overrides, 'widen'):
314 tgetter = lambda: tr
314 tgetter = lambda: tr
315 bundle2.processbundle(repo, bundle,
315 bundle2.processbundle(repo, bundle,
316 transactiongetter=tgetter)
316 transactiongetter=tgetter)
317
317
318 repo.setnewnarrowpats()
318 repo.setnewnarrowpats()
319 actions = merge.emptyactions()
319 actions = merge.emptyactions()
320 addgaction = actions['g'].append
320 addgaction = actions['g'].append
321
321
322 mf = repo['.'].manifest().matches(newmatch)
322 mf = repo['.'].manifest().matches(newmatch)
323 for f, fn in mf.iteritems():
323 for f, fn in mf.iteritems():
324 if f not in repo.dirstate:
324 if f not in repo.dirstate:
325 addgaction((f, (mf.flags(f), False),
325 addgaction((f, (mf.flags(f), False),
326 "add from widened narrow clone"))
326 "add from widened narrow clone"))
327
327
328 merge.applyupdates(repo, actions, wctx=repo[None],
328 merge.applyupdates(repo, actions, wctx=repo[None],
329 mctx=repo['.'], overwrite=False)
329 mctx=repo['.'], overwrite=False)
330 merge.recordupdates(repo, actions, branchmerge=False)
330 merge.recordupdates(repo, actions, branchmerge=False)
331
331
332 # TODO(rdamazio): Make new matcher format and update description
332 # TODO(rdamazio): Make new matcher format and update description
333 @command('tracked',
333 @command('tracked',
334 [('', 'addinclude', [], _('new paths to include')),
334 [('', 'addinclude', [], _('new paths to include')),
335 ('', 'removeinclude', [], _('old paths to no longer include')),
335 ('', 'removeinclude', [], _('old paths to no longer include')),
336 ('', 'addexclude', [], _('new paths to exclude')),
336 ('', 'addexclude', [], _('new paths to exclude')),
337 ('', 'import-rules', '', _('import narrowspecs from a file')),
337 ('', 'import-rules', '', _('import narrowspecs from a file')),
338 ('', 'removeexclude', [], _('old paths to no longer exclude')),
338 ('', 'removeexclude', [], _('old paths to no longer exclude')),
339 ('', 'clear', False, _('whether to replace the existing narrowspec')),
339 ('', 'clear', False, _('whether to replace the existing narrowspec')),
340 ('', 'force-delete-local-changes', False,
340 ('', 'force-delete-local-changes', False,
341 _('forces deletion of local changes when narrowing')),
341 _('forces deletion of local changes when narrowing')),
342 ('', 'update-working-copy', False,
343 _('update working copy when the store has changed')),
342 ] + commands.remoteopts,
344 ] + commands.remoteopts,
343 _('[OPTIONS]... [REMOTE]'),
345 _('[OPTIONS]... [REMOTE]'),
344 inferrepo=True)
346 inferrepo=True)
345 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
347 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
346 """show or change the current narrowspec
348 """show or change the current narrowspec
347
349
348 With no argument, shows the current narrowspec entries, one per line. Each
350 With no argument, shows the current narrowspec entries, one per line. Each
349 line will be prefixed with 'I' or 'X' for included or excluded patterns,
351 line will be prefixed with 'I' or 'X' for included or excluded patterns,
350 respectively.
352 respectively.
351
353
352 The narrowspec is comprised of expressions to match remote files and/or
354 The narrowspec is comprised of expressions to match remote files and/or
353 directories that should be pulled into your client.
355 directories that should be pulled into your client.
354 The narrowspec has *include* and *exclude* expressions, with excludes always
356 The narrowspec has *include* and *exclude* expressions, with excludes always
355 trumping includes: that is, if a file matches an exclude expression, it will
357 trumping includes: that is, if a file matches an exclude expression, it will
356 be excluded even if it also matches an include expression.
358 be excluded even if it also matches an include expression.
357 Excluding files that were never included has no effect.
359 Excluding files that were never included has no effect.
358
360
359 Each included or excluded entry is in the format described by
361 Each included or excluded entry is in the format described by
360 'hg help patterns'.
362 'hg help patterns'.
361
363
362 The options allow you to add or remove included and excluded expressions.
364 The options allow you to add or remove included and excluded expressions.
363
365
364 If --clear is specified, then all previous includes and excludes are DROPPED
366 If --clear is specified, then all previous includes and excludes are DROPPED
365 and replaced by the new ones specified to --addinclude and --addexclude.
367 and replaced by the new ones specified to --addinclude and --addexclude.
366 If --clear is specified without any further options, the narrowspec will be
368 If --clear is specified without any further options, the narrowspec will be
367 empty and will not match any files.
369 empty and will not match any files.
368 """
370 """
369 opts = pycompat.byteskwargs(opts)
371 opts = pycompat.byteskwargs(opts)
370 if repository.NARROW_REQUIREMENT not in repo.requirements:
372 if repository.NARROW_REQUIREMENT not in repo.requirements:
371 raise error.Abort(_('the narrow command is only supported on '
373 raise error.Abort(_('the narrow command is only supported on '
372 'respositories cloned with --narrow'))
374 'respositories cloned with --narrow'))
373
375
374 # Before supporting, decide whether it "hg tracked --clear" should mean
376 # Before supporting, decide whether it "hg tracked --clear" should mean
375 # tracking no paths or all paths.
377 # tracking no paths or all paths.
376 if opts['clear']:
378 if opts['clear']:
377 raise error.Abort(_('the --clear option is not yet supported'))
379 raise error.Abort(_('the --clear option is not yet supported'))
378
380
379 # import rules from a file
381 # import rules from a file
380 newrules = opts.get('import_rules')
382 newrules = opts.get('import_rules')
381 if newrules:
383 if newrules:
382 try:
384 try:
383 filepath = os.path.join(encoding.getcwd(), newrules)
385 filepath = os.path.join(encoding.getcwd(), newrules)
384 fdata = util.readfile(filepath)
386 fdata = util.readfile(filepath)
385 except IOError as inst:
387 except IOError as inst:
386 raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
388 raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
387 (filepath, encoding.strtolocal(inst.strerror)))
389 (filepath, encoding.strtolocal(inst.strerror)))
388 includepats, excludepats, profiles = sparse.parseconfig(ui, fdata,
390 includepats, excludepats, profiles = sparse.parseconfig(ui, fdata,
389 'narrow')
391 'narrow')
390 if profiles:
392 if profiles:
391 raise error.Abort(_("including other spec files using '%include' "
393 raise error.Abort(_("including other spec files using '%include' "
392 "is not supported in narrowspec"))
394 "is not supported in narrowspec"))
393 opts['addinclude'].extend(includepats)
395 opts['addinclude'].extend(includepats)
394 opts['addexclude'].extend(excludepats)
396 opts['addexclude'].extend(excludepats)
395
397
396 addedincludes = narrowspec.parsepatterns(opts['addinclude'])
398 addedincludes = narrowspec.parsepatterns(opts['addinclude'])
397 removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
399 removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
398 addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
400 addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
399 removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
401 removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
400
402
403 update_working_copy = opts['update_working_copy']
401 only_show = not (addedincludes or removedincludes or addedexcludes or
404 only_show = not (addedincludes or removedincludes or addedexcludes or
402 removedexcludes or newrules)
405 removedexcludes or newrules or update_working_copy)
403
406
404 oldincludes, oldexcludes = repo.narrowpats
407 oldincludes, oldexcludes = repo.narrowpats
405
408
406 # filter the user passed additions and deletions into actual additions and
409 # filter the user passed additions and deletions into actual additions and
407 # deletions of excludes and includes
410 # deletions of excludes and includes
408 addedincludes -= oldincludes
411 addedincludes -= oldincludes
409 removedincludes &= oldincludes
412 removedincludes &= oldincludes
410 addedexcludes -= oldexcludes
413 addedexcludes -= oldexcludes
411 removedexcludes &= oldexcludes
414 removedexcludes &= oldexcludes
412
415
413 widening = addedincludes or removedexcludes
416 widening = addedincludes or removedexcludes
414 narrowing = removedincludes or addedexcludes
417 narrowing = removedincludes or addedexcludes
415
418
416 # Only print the current narrowspec.
419 # Only print the current narrowspec.
417 if only_show:
420 if only_show:
418 ui.pager('tracked')
421 ui.pager('tracked')
419 fm = ui.formatter('narrow', opts)
422 fm = ui.formatter('narrow', opts)
420 for i in sorted(oldincludes):
423 for i in sorted(oldincludes):
421 fm.startitem()
424 fm.startitem()
422 fm.write('status', '%s ', 'I', label='narrow.included')
425 fm.write('status', '%s ', 'I', label='narrow.included')
423 fm.write('pat', '%s\n', i, label='narrow.included')
426 fm.write('pat', '%s\n', i, label='narrow.included')
424 for i in sorted(oldexcludes):
427 for i in sorted(oldexcludes):
425 fm.startitem()
428 fm.startitem()
426 fm.write('status', '%s ', 'X', label='narrow.excluded')
429 fm.write('status', '%s ', 'X', label='narrow.excluded')
427 fm.write('pat', '%s\n', i, label='narrow.excluded')
430 fm.write('pat', '%s\n', i, label='narrow.excluded')
428 fm.end()
431 fm.end()
429 return 0
432 return 0
430
433
434 if update_working_copy:
435 with repo.wlock(), repo.lock(), repo.transaction('narrow-wc') as tr:
436 narrowspec.updateworkingcopy(repo, tr)
437 narrowspec.copytoworkingcopy(repo, tr)
438 return 0
439
431 if not widening and not narrowing:
440 if not widening and not narrowing:
432 ui.status(_("nothing to widen or narrow\n"))
441 ui.status(_("nothing to widen or narrow\n"))
433 return 0
442 return 0
434
443
435 with repo.wlock(), repo.lock():
444 with repo.wlock(), repo.lock():
436 cmdutil.bailifchanged(repo)
445 cmdutil.bailifchanged(repo)
437
446
438 # Find the revisions we have in common with the remote. These will
447 # Find the revisions we have in common with the remote. These will
439 # be used for finding local-only changes for narrowing. They will
448 # be used for finding local-only changes for narrowing. They will
440 # also define the set of revisions to update for widening.
449 # also define the set of revisions to update for widening.
441 remotepath = ui.expandpath(remotepath or 'default')
450 remotepath = ui.expandpath(remotepath or 'default')
442 url, branches = hg.parseurl(remotepath)
451 url, branches = hg.parseurl(remotepath)
443 ui.status(_('comparing with %s\n') % util.hidepassword(url))
452 ui.status(_('comparing with %s\n') % util.hidepassword(url))
444 remote = hg.peer(repo, opts, url)
453 remote = hg.peer(repo, opts, url)
445
454
446 # check narrow support before doing anything if widening needs to be
455 # check narrow support before doing anything if widening needs to be
447 # performed. In future we should also abort if client is ellipses and
456 # performed. In future we should also abort if client is ellipses and
448 # server does not support ellipses
457 # server does not support ellipses
449 if widening and wireprototypes.NARROWCAP not in remote.capabilities():
458 if widening and wireprototypes.NARROWCAP not in remote.capabilities():
450 raise error.Abort(_("server does not support narrow clones"))
459 raise error.Abort(_("server does not support narrow clones"))
451
460
452 commoninc = discovery.findcommonincoming(repo, remote)
461 commoninc = discovery.findcommonincoming(repo, remote)
453
462
454 if narrowing:
463 if narrowing:
455 newincludes = oldincludes - removedincludes
464 newincludes = oldincludes - removedincludes
456 newexcludes = oldexcludes | addedexcludes
465 newexcludes = oldexcludes | addedexcludes
457 _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
466 _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
458 newincludes, newexcludes,
467 newincludes, newexcludes,
459 opts['force_delete_local_changes'])
468 opts['force_delete_local_changes'])
460 # _narrow() updated the narrowspec and _widen() below needs to
469 # _narrow() updated the narrowspec and _widen() below needs to
461 # use the updated values as its base (otherwise removed includes
470 # use the updated values as its base (otherwise removed includes
462 # and addedexcludes will be lost in the resulting narrowspec)
471 # and addedexcludes will be lost in the resulting narrowspec)
463 oldincludes = newincludes
472 oldincludes = newincludes
464 oldexcludes = newexcludes
473 oldexcludes = newexcludes
465
474
466 if widening:
475 if widening:
467 newincludes = oldincludes | addedincludes
476 newincludes = oldincludes | addedincludes
468 newexcludes = oldexcludes - removedexcludes
477 newexcludes = oldexcludes - removedexcludes
469 _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes,
478 _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes,
470 newincludes, newexcludes)
479 newincludes, newexcludes)
471
480
472 return 0
481 return 0
@@ -1,1225 +1,1229 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 repository as repositorymod,
41 scmutil,
42 scmutil,
42 sshpeer,
43 sshpeer,
43 statichttprepo,
44 statichttprepo,
44 ui as uimod,
45 ui as uimod,
45 unionrepo,
46 unionrepo,
46 url,
47 url,
47 util,
48 util,
48 verify as verifymod,
49 verify as verifymod,
49 vfs as vfsmod,
50 vfs as vfsmod,
50 )
51 )
51
52
52 release = lock.release
53 release = lock.release
53
54
54 # shared features
55 # shared features
55 sharedbookmarks = 'bookmarks'
56 sharedbookmarks = 'bookmarks'
56
57
57 def _local(path):
58 def _local(path):
58 path = util.expandpath(util.urllocalpath(path))
59 path = util.expandpath(util.urllocalpath(path))
59 return (os.path.isfile(path) and bundlerepo or localrepo)
60 return (os.path.isfile(path) and bundlerepo or localrepo)
60
61
61 def addbranchrevs(lrepo, other, branches, revs):
62 def addbranchrevs(lrepo, other, branches, revs):
62 peer = other.peer() # a courtesy to callers using a localrepo for other
63 peer = other.peer() # a courtesy to callers using a localrepo for other
63 hashbranch, branches = branches
64 hashbranch, branches = branches
64 if not hashbranch and not branches:
65 if not hashbranch and not branches:
65 x = revs or None
66 x = revs or None
66 if revs:
67 if revs:
67 y = revs[0]
68 y = revs[0]
68 else:
69 else:
69 y = None
70 y = None
70 return x, y
71 return x, y
71 if revs:
72 if revs:
72 revs = list(revs)
73 revs = list(revs)
73 else:
74 else:
74 revs = []
75 revs = []
75
76
76 if not peer.capable('branchmap'):
77 if not peer.capable('branchmap'):
77 if branches:
78 if branches:
78 raise error.Abort(_("remote branch lookup not supported"))
79 raise error.Abort(_("remote branch lookup not supported"))
79 revs.append(hashbranch)
80 revs.append(hashbranch)
80 return revs, revs[0]
81 return revs, revs[0]
81
82
82 with peer.commandexecutor() as e:
83 with peer.commandexecutor() as e:
83 branchmap = e.callcommand('branchmap', {}).result()
84 branchmap = e.callcommand('branchmap', {}).result()
84
85
85 def primary(branch):
86 def primary(branch):
86 if branch == '.':
87 if branch == '.':
87 if not lrepo:
88 if not lrepo:
88 raise error.Abort(_("dirstate branch not accessible"))
89 raise error.Abort(_("dirstate branch not accessible"))
89 branch = lrepo.dirstate.branch()
90 branch = lrepo.dirstate.branch()
90 if branch in branchmap:
91 if branch in branchmap:
91 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
92 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
92 return True
93 return True
93 else:
94 else:
94 return False
95 return False
95
96
96 for branch in branches:
97 for branch in branches:
97 if not primary(branch):
98 if not primary(branch):
98 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
99 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
99 if hashbranch:
100 if hashbranch:
100 if not primary(hashbranch):
101 if not primary(hashbranch):
101 revs.append(hashbranch)
102 revs.append(hashbranch)
102 return revs, revs[0]
103 return revs, revs[0]
103
104
104 def parseurl(path, branches=None):
105 def parseurl(path, branches=None):
105 '''parse url#branch, returning (url, (branch, branches))'''
106 '''parse url#branch, returning (url, (branch, branches))'''
106
107
107 u = util.url(path)
108 u = util.url(path)
108 branch = None
109 branch = None
109 if u.fragment:
110 if u.fragment:
110 branch = u.fragment
111 branch = u.fragment
111 u.fragment = None
112 u.fragment = None
112 return bytes(u), (branch, branches or [])
113 return bytes(u), (branch, branches or [])
113
114
114 schemes = {
115 schemes = {
115 'bundle': bundlerepo,
116 'bundle': bundlerepo,
116 'union': unionrepo,
117 'union': unionrepo,
117 'file': _local,
118 'file': _local,
118 'http': httppeer,
119 'http': httppeer,
119 'https': httppeer,
120 'https': httppeer,
120 'ssh': sshpeer,
121 'ssh': sshpeer,
121 'static-http': statichttprepo,
122 'static-http': statichttprepo,
122 }
123 }
123
124
124 def _peerlookup(path):
125 def _peerlookup(path):
125 u = util.url(path)
126 u = util.url(path)
126 scheme = u.scheme or 'file'
127 scheme = u.scheme or 'file'
127 thing = schemes.get(scheme) or schemes['file']
128 thing = schemes.get(scheme) or schemes['file']
128 try:
129 try:
129 return thing(path)
130 return thing(path)
130 except TypeError:
131 except TypeError:
131 # we can't test callable(thing) because 'thing' can be an unloaded
132 # we can't test callable(thing) because 'thing' can be an unloaded
132 # module that implements __call__
133 # module that implements __call__
133 if not util.safehasattr(thing, 'instance'):
134 if not util.safehasattr(thing, 'instance'):
134 raise
135 raise
135 return thing
136 return thing
136
137
137 def islocal(repo):
138 def islocal(repo):
138 '''return true if repo (or path pointing to repo) is local'''
139 '''return true if repo (or path pointing to repo) is local'''
139 if isinstance(repo, bytes):
140 if isinstance(repo, bytes):
140 try:
141 try:
141 return _peerlookup(repo).islocal(repo)
142 return _peerlookup(repo).islocal(repo)
142 except AttributeError:
143 except AttributeError:
143 return False
144 return False
144 return repo.local()
145 return repo.local()
145
146
146 def openpath(ui, path):
147 def openpath(ui, path):
147 '''open path with open if local, url.open if remote'''
148 '''open path with open if local, url.open if remote'''
148 pathurl = util.url(path, parsequery=False, parsefragment=False)
149 pathurl = util.url(path, parsequery=False, parsefragment=False)
149 if pathurl.islocal():
150 if pathurl.islocal():
150 return util.posixfile(pathurl.localpath(), 'rb')
151 return util.posixfile(pathurl.localpath(), 'rb')
151 else:
152 else:
152 return url.open(ui, path)
153 return url.open(ui, path)
153
154
154 # a list of (ui, repo) functions called for wire peer initialization
155 # a list of (ui, repo) functions called for wire peer initialization
155 wirepeersetupfuncs = []
156 wirepeersetupfuncs = []
156
157
157 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
158 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
158 intents=None, createopts=None):
159 intents=None, createopts=None):
159 """return a repository object for the specified path"""
160 """return a repository object for the specified path"""
160 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
161 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
161 createopts=createopts)
162 createopts=createopts)
162 ui = getattr(obj, "ui", ui)
163 ui = getattr(obj, "ui", ui)
163 for f in presetupfuncs or []:
164 for f in presetupfuncs or []:
164 f(ui, obj)
165 f(ui, obj)
165 ui.log(b'extension', b'- executing reposetup hooks\n')
166 ui.log(b'extension', b'- executing reposetup hooks\n')
166 with util.timedcm('all reposetup') as allreposetupstats:
167 with util.timedcm('all reposetup') as allreposetupstats:
167 for name, module in extensions.extensions(ui):
168 for name, module in extensions.extensions(ui):
168 ui.log(b'extension', b' - running reposetup for %s\n', name)
169 ui.log(b'extension', b' - running reposetup for %s\n', name)
169 hook = getattr(module, 'reposetup', None)
170 hook = getattr(module, 'reposetup', None)
170 if hook:
171 if hook:
171 with util.timedcm('reposetup %r', name) as stats:
172 with util.timedcm('reposetup %r', name) as stats:
172 hook(ui, obj)
173 hook(ui, obj)
173 ui.log(b'extension', b' > reposetup for %s took %s\n',
174 ui.log(b'extension', b' > reposetup for %s took %s\n',
174 name, stats)
175 name, stats)
175 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
176 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
176 if not obj.local():
177 if not obj.local():
177 for f in wirepeersetupfuncs:
178 for f in wirepeersetupfuncs:
178 f(ui, obj)
179 f(ui, obj)
179 return obj
180 return obj
180
181
181 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
182 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
182 createopts=None):
183 createopts=None):
183 """return a repository object for the specified path"""
184 """return a repository object for the specified path"""
184 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
185 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
185 intents=intents, createopts=createopts)
186 intents=intents, createopts=createopts)
186 repo = peer.local()
187 repo = peer.local()
187 if not repo:
188 if not repo:
188 raise error.Abort(_("repository '%s' is not local") %
189 raise error.Abort(_("repository '%s' is not local") %
189 (path or peer.url()))
190 (path or peer.url()))
190 return repo.filtered('visible')
191 return repo.filtered('visible')
191
192
192 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
193 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
193 '''return a repository peer for the specified path'''
194 '''return a repository peer for the specified path'''
194 rui = remoteui(uiorrepo, opts)
195 rui = remoteui(uiorrepo, opts)
195 return _peerorrepo(rui, path, create, intents=intents,
196 return _peerorrepo(rui, path, create, intents=intents,
196 createopts=createopts).peer()
197 createopts=createopts).peer()
197
198
198 def defaultdest(source):
199 def defaultdest(source):
199 '''return default destination of clone if none is given
200 '''return default destination of clone if none is given
200
201
201 >>> defaultdest(b'foo')
202 >>> defaultdest(b'foo')
202 'foo'
203 'foo'
203 >>> defaultdest(b'/foo/bar')
204 >>> defaultdest(b'/foo/bar')
204 'bar'
205 'bar'
205 >>> defaultdest(b'/')
206 >>> defaultdest(b'/')
206 ''
207 ''
207 >>> defaultdest(b'')
208 >>> defaultdest(b'')
208 ''
209 ''
209 >>> defaultdest(b'http://example.org/')
210 >>> defaultdest(b'http://example.org/')
210 ''
211 ''
211 >>> defaultdest(b'http://example.org/foo/')
212 >>> defaultdest(b'http://example.org/foo/')
212 'foo'
213 'foo'
213 '''
214 '''
214 path = util.url(source).path
215 path = util.url(source).path
215 if not path:
216 if not path:
216 return ''
217 return ''
217 return os.path.basename(os.path.normpath(path))
218 return os.path.basename(os.path.normpath(path))
218
219
219 def sharedreposource(repo):
220 def sharedreposource(repo):
220 """Returns repository object for source repository of a shared repo.
221 """Returns repository object for source repository of a shared repo.
221
222
222 If repo is not a shared repository, returns None.
223 If repo is not a shared repository, returns None.
223 """
224 """
224 if repo.sharedpath == repo.path:
225 if repo.sharedpath == repo.path:
225 return None
226 return None
226
227
227 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
228 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
228 return repo.srcrepo
229 return repo.srcrepo
229
230
230 # the sharedpath always ends in the .hg; we want the path to the repo
231 # the sharedpath always ends in the .hg; we want the path to the repo
231 source = repo.vfs.split(repo.sharedpath)[0]
232 source = repo.vfs.split(repo.sharedpath)[0]
232 srcurl, branches = parseurl(source)
233 srcurl, branches = parseurl(source)
233 srcrepo = repository(repo.ui, srcurl)
234 srcrepo = repository(repo.ui, srcurl)
234 repo.srcrepo = srcrepo
235 repo.srcrepo = srcrepo
235 return srcrepo
236 return srcrepo
236
237
237 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
238 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
238 relative=False):
239 relative=False):
239 '''create a shared repository'''
240 '''create a shared repository'''
240
241
241 if not islocal(source):
242 if not islocal(source):
242 raise error.Abort(_('can only share local repositories'))
243 raise error.Abort(_('can only share local repositories'))
243
244
244 if not dest:
245 if not dest:
245 dest = defaultdest(source)
246 dest = defaultdest(source)
246 else:
247 else:
247 dest = ui.expandpath(dest)
248 dest = ui.expandpath(dest)
248
249
249 if isinstance(source, bytes):
250 if isinstance(source, bytes):
250 origsource = ui.expandpath(source)
251 origsource = ui.expandpath(source)
251 source, branches = parseurl(origsource)
252 source, branches = parseurl(origsource)
252 srcrepo = repository(ui, source)
253 srcrepo = repository(ui, source)
253 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
254 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
254 else:
255 else:
255 srcrepo = source.local()
256 srcrepo = source.local()
256 checkout = None
257 checkout = None
257
258
258 shareditems = set()
259 shareditems = set()
259 if bookmarks:
260 if bookmarks:
260 shareditems.add(sharedbookmarks)
261 shareditems.add(sharedbookmarks)
261
262
262 r = repository(ui, dest, create=True, createopts={
263 r = repository(ui, dest, create=True, createopts={
263 'sharedrepo': srcrepo,
264 'sharedrepo': srcrepo,
264 'sharedrelative': relative,
265 'sharedrelative': relative,
265 'shareditems': shareditems,
266 'shareditems': shareditems,
266 })
267 })
267
268
268 postshare(srcrepo, r, defaultpath=defaultpath)
269 postshare(srcrepo, r, defaultpath=defaultpath)
269 r = repository(ui, dest)
270 r = repository(ui, dest)
270 _postshareupdate(r, update, checkout=checkout)
271 _postshareupdate(r, update, checkout=checkout)
271 return r
272 return r
272
273
273 def unshare(ui, repo):
274 def unshare(ui, repo):
274 """convert a shared repository to a normal one
275 """convert a shared repository to a normal one
275
276
276 Copy the store data to the repo and remove the sharedpath data.
277 Copy the store data to the repo and remove the sharedpath data.
277
278
278 Returns a new repository object representing the unshared repository.
279 Returns a new repository object representing the unshared repository.
279
280
280 The passed repository object is not usable after this function is
281 The passed repository object is not usable after this function is
281 called.
282 called.
282 """
283 """
283
284
284 destlock = lock = None
285 destlock = lock = None
285 lock = repo.lock()
286 lock = repo.lock()
286 try:
287 try:
287 # we use locks here because if we race with commit, we
288 # we use locks here because if we race with commit, we
288 # can end up with extra data in the cloned revlogs that's
289 # can end up with extra data in the cloned revlogs that's
289 # not pointed to by changesets, thus causing verify to
290 # not pointed to by changesets, thus causing verify to
290 # fail
291 # fail
291
292
292 destlock = copystore(ui, repo, repo.path)
293 destlock = copystore(ui, repo, repo.path)
293
294
294 sharefile = repo.vfs.join('sharedpath')
295 sharefile = repo.vfs.join('sharedpath')
295 util.rename(sharefile, sharefile + '.old')
296 util.rename(sharefile, sharefile + '.old')
296
297
297 repo.requirements.discard('shared')
298 repo.requirements.discard('shared')
298 repo.requirements.discard('relshared')
299 repo.requirements.discard('relshared')
299 repo._writerequirements()
300 repo._writerequirements()
300 finally:
301 finally:
301 destlock and destlock.release()
302 destlock and destlock.release()
302 lock and lock.release()
303 lock and lock.release()
303
304
304 # Removing share changes some fundamental properties of the repo instance.
305 # Removing share changes some fundamental properties of the repo instance.
305 # So we instantiate a new repo object and operate on it rather than
306 # So we instantiate a new repo object and operate on it rather than
306 # try to keep the existing repo usable.
307 # try to keep the existing repo usable.
307 newrepo = repository(repo.baseui, repo.root, create=False)
308 newrepo = repository(repo.baseui, repo.root, create=False)
308
309
309 # TODO: figure out how to access subrepos that exist, but were previously
310 # TODO: figure out how to access subrepos that exist, but were previously
310 # removed from .hgsub
311 # removed from .hgsub
311 c = newrepo['.']
312 c = newrepo['.']
312 subs = c.substate
313 subs = c.substate
313 for s in sorted(subs):
314 for s in sorted(subs):
314 c.sub(s).unshare()
315 c.sub(s).unshare()
315
316
316 localrepo.poisonrepository(repo)
317 localrepo.poisonrepository(repo)
317
318
318 return newrepo
319 return newrepo
319
320
320 def postshare(sourcerepo, destrepo, defaultpath=None):
321 def postshare(sourcerepo, destrepo, defaultpath=None):
321 """Called after a new shared repo is created.
322 """Called after a new shared repo is created.
322
323
323 The new repo only has a requirements file and pointer to the source.
324 The new repo only has a requirements file and pointer to the source.
324 This function configures additional shared data.
325 This function configures additional shared data.
325
326
326 Extensions can wrap this function and write additional entries to
327 Extensions can wrap this function and write additional entries to
327 destrepo/.hg/shared to indicate additional pieces of data to be shared.
328 destrepo/.hg/shared to indicate additional pieces of data to be shared.
328 """
329 """
329 default = defaultpath or sourcerepo.ui.config('paths', 'default')
330 default = defaultpath or sourcerepo.ui.config('paths', 'default')
330 if default:
331 if default:
331 template = ('[paths]\n'
332 template = ('[paths]\n'
332 'default = %s\n')
333 'default = %s\n')
333 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
334 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
335 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
336 with destrepo.wlock():
337 narrowspec.copytoworkingcopy(destrepo, None)
334
338
335 def _postshareupdate(repo, update, checkout=None):
339 def _postshareupdate(repo, update, checkout=None):
336 """Maybe perform a working directory update after a shared repo is created.
340 """Maybe perform a working directory update after a shared repo is created.
337
341
338 ``update`` can be a boolean or a revision to update to.
342 ``update`` can be a boolean or a revision to update to.
339 """
343 """
340 if not update:
344 if not update:
341 return
345 return
342
346
343 repo.ui.status(_("updating working directory\n"))
347 repo.ui.status(_("updating working directory\n"))
344 if update is not True:
348 if update is not True:
345 checkout = update
349 checkout = update
346 for test in (checkout, 'default', 'tip'):
350 for test in (checkout, 'default', 'tip'):
347 if test is None:
351 if test is None:
348 continue
352 continue
349 try:
353 try:
350 uprev = repo.lookup(test)
354 uprev = repo.lookup(test)
351 break
355 break
352 except error.RepoLookupError:
356 except error.RepoLookupError:
353 continue
357 continue
354 _update(repo, uprev)
358 _update(repo, uprev)
355
359
356 def copystore(ui, srcrepo, destpath):
360 def copystore(ui, srcrepo, destpath):
357 '''copy files from store of srcrepo in destpath
361 '''copy files from store of srcrepo in destpath
358
362
359 returns destlock
363 returns destlock
360 '''
364 '''
361 destlock = None
365 destlock = None
362 try:
366 try:
363 hardlink = None
367 hardlink = None
364 topic = _('linking') if hardlink else _('copying')
368 topic = _('linking') if hardlink else _('copying')
365 with ui.makeprogress(topic, unit=_('files')) as progress:
369 with ui.makeprogress(topic, unit=_('files')) as progress:
366 num = 0
370 num = 0
367 srcpublishing = srcrepo.publishing()
371 srcpublishing = srcrepo.publishing()
368 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
372 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
369 dstvfs = vfsmod.vfs(destpath)
373 dstvfs = vfsmod.vfs(destpath)
370 for f in srcrepo.store.copylist():
374 for f in srcrepo.store.copylist():
371 if srcpublishing and f.endswith('phaseroots'):
375 if srcpublishing and f.endswith('phaseroots'):
372 continue
376 continue
373 dstbase = os.path.dirname(f)
377 dstbase = os.path.dirname(f)
374 if dstbase and not dstvfs.exists(dstbase):
378 if dstbase and not dstvfs.exists(dstbase):
375 dstvfs.mkdir(dstbase)
379 dstvfs.mkdir(dstbase)
376 if srcvfs.exists(f):
380 if srcvfs.exists(f):
377 if f.endswith('data'):
381 if f.endswith('data'):
378 # 'dstbase' may be empty (e.g. revlog format 0)
382 # 'dstbase' may be empty (e.g. revlog format 0)
379 lockfile = os.path.join(dstbase, "lock")
383 lockfile = os.path.join(dstbase, "lock")
380 # lock to avoid premature writing to the target
384 # lock to avoid premature writing to the target
381 destlock = lock.lock(dstvfs, lockfile)
385 destlock = lock.lock(dstvfs, lockfile)
382 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
386 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
383 hardlink, progress)
387 hardlink, progress)
384 num += n
388 num += n
385 if hardlink:
389 if hardlink:
386 ui.debug("linked %d files\n" % num)
390 ui.debug("linked %d files\n" % num)
387 else:
391 else:
388 ui.debug("copied %d files\n" % num)
392 ui.debug("copied %d files\n" % num)
389 return destlock
393 return destlock
390 except: # re-raises
394 except: # re-raises
391 release(destlock)
395 release(destlock)
392 raise
396 raise
393
397
394 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
398 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
395 rev=None, update=True, stream=False):
399 rev=None, update=True, stream=False):
396 """Perform a clone using a shared repo.
400 """Perform a clone using a shared repo.
397
401
398 The store for the repository will be located at <sharepath>/.hg. The
402 The store for the repository will be located at <sharepath>/.hg. The
399 specified revisions will be cloned or pulled from "source". A shared repo
403 specified revisions will be cloned or pulled from "source". A shared repo
400 will be created at "dest" and a working copy will be created if "update" is
404 will be created at "dest" and a working copy will be created if "update" is
401 True.
405 True.
402 """
406 """
403 revs = None
407 revs = None
404 if rev:
408 if rev:
405 if not srcpeer.capable('lookup'):
409 if not srcpeer.capable('lookup'):
406 raise error.Abort(_("src repository does not support "
410 raise error.Abort(_("src repository does not support "
407 "revision lookup and so doesn't "
411 "revision lookup and so doesn't "
408 "support clone by revision"))
412 "support clone by revision"))
409
413
410 # TODO this is batchable.
414 # TODO this is batchable.
411 remoterevs = []
415 remoterevs = []
412 for r in rev:
416 for r in rev:
413 with srcpeer.commandexecutor() as e:
417 with srcpeer.commandexecutor() as e:
414 remoterevs.append(e.callcommand('lookup', {
418 remoterevs.append(e.callcommand('lookup', {
415 'key': r,
419 'key': r,
416 }).result())
420 }).result())
417 revs = remoterevs
421 revs = remoterevs
418
422
419 # Obtain a lock before checking for or cloning the pooled repo otherwise
423 # Obtain a lock before checking for or cloning the pooled repo otherwise
420 # 2 clients may race creating or populating it.
424 # 2 clients may race creating or populating it.
421 pooldir = os.path.dirname(sharepath)
425 pooldir = os.path.dirname(sharepath)
422 # lock class requires the directory to exist.
426 # lock class requires the directory to exist.
423 try:
427 try:
424 util.makedir(pooldir, False)
428 util.makedir(pooldir, False)
425 except OSError as e:
429 except OSError as e:
426 if e.errno != errno.EEXIST:
430 if e.errno != errno.EEXIST:
427 raise
431 raise
428
432
429 poolvfs = vfsmod.vfs(pooldir)
433 poolvfs = vfsmod.vfs(pooldir)
430 basename = os.path.basename(sharepath)
434 basename = os.path.basename(sharepath)
431
435
432 with lock.lock(poolvfs, '%s.lock' % basename):
436 with lock.lock(poolvfs, '%s.lock' % basename):
433 if os.path.exists(sharepath):
437 if os.path.exists(sharepath):
434 ui.status(_('(sharing from existing pooled repository %s)\n') %
438 ui.status(_('(sharing from existing pooled repository %s)\n') %
435 basename)
439 basename)
436 else:
440 else:
437 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
441 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
438 # Always use pull mode because hardlinks in share mode don't work
442 # Always use pull mode because hardlinks in share mode don't work
439 # well. Never update because working copies aren't necessary in
443 # well. Never update because working copies aren't necessary in
440 # share mode.
444 # share mode.
441 clone(ui, peeropts, source, dest=sharepath, pull=True,
445 clone(ui, peeropts, source, dest=sharepath, pull=True,
442 revs=rev, update=False, stream=stream)
446 revs=rev, update=False, stream=stream)
443
447
444 # Resolve the value to put in [paths] section for the source.
448 # Resolve the value to put in [paths] section for the source.
445 if islocal(source):
449 if islocal(source):
446 defaultpath = os.path.abspath(util.urllocalpath(source))
450 defaultpath = os.path.abspath(util.urllocalpath(source))
447 else:
451 else:
448 defaultpath = source
452 defaultpath = source
449
453
450 sharerepo = repository(ui, path=sharepath)
454 sharerepo = repository(ui, path=sharepath)
451 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
455 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
452 defaultpath=defaultpath)
456 defaultpath=defaultpath)
453
457
454 # We need to perform a pull against the dest repo to fetch bookmarks
458 # We need to perform a pull against the dest repo to fetch bookmarks
455 # and other non-store data that isn't shared by default. In the case of
459 # and other non-store data that isn't shared by default. In the case of
456 # non-existing shared repo, this means we pull from the remote twice. This
460 # non-existing shared repo, this means we pull from the remote twice. This
457 # is a bit weird. But at the time it was implemented, there wasn't an easy
461 # is a bit weird. But at the time it was implemented, there wasn't an easy
458 # way to pull just non-changegroup data.
462 # way to pull just non-changegroup data.
459 exchange.pull(destrepo, srcpeer, heads=revs)
463 exchange.pull(destrepo, srcpeer, heads=revs)
460
464
461 _postshareupdate(destrepo, update)
465 _postshareupdate(destrepo, update)
462
466
463 return srcpeer, peer(ui, peeropts, dest)
467 return srcpeer, peer(ui, peeropts, dest)
464
468
465 # Recomputing branch cache might be slow on big repos,
469 # Recomputing branch cache might be slow on big repos,
466 # so just copy it
470 # so just copy it
467 def _copycache(srcrepo, dstcachedir, fname):
471 def _copycache(srcrepo, dstcachedir, fname):
468 """copy a cache from srcrepo to destcachedir (if it exists)"""
472 """copy a cache from srcrepo to destcachedir (if it exists)"""
469 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
473 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
470 dstbranchcache = os.path.join(dstcachedir, fname)
474 dstbranchcache = os.path.join(dstcachedir, fname)
471 if os.path.exists(srcbranchcache):
475 if os.path.exists(srcbranchcache):
472 if not os.path.exists(dstcachedir):
476 if not os.path.exists(dstcachedir):
473 os.mkdir(dstcachedir)
477 os.mkdir(dstcachedir)
474 util.copyfile(srcbranchcache, dstbranchcache)
478 util.copyfile(srcbranchcache, dstbranchcache)
475
479
476 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
480 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
477 update=True, stream=False, branch=None, shareopts=None,
481 update=True, stream=False, branch=None, shareopts=None,
478 storeincludepats=None, storeexcludepats=None, depth=None):
482 storeincludepats=None, storeexcludepats=None, depth=None):
479 """Make a copy of an existing repository.
483 """Make a copy of an existing repository.
480
484
481 Create a copy of an existing repository in a new directory. The
485 Create a copy of an existing repository in a new directory. The
482 source and destination are URLs, as passed to the repository
486 source and destination are URLs, as passed to the repository
483 function. Returns a pair of repository peers, the source and
487 function. Returns a pair of repository peers, the source and
484 newly created destination.
488 newly created destination.
485
489
486 The location of the source is added to the new repository's
490 The location of the source is added to the new repository's
487 .hg/hgrc file, as the default to be used for future pulls and
491 .hg/hgrc file, as the default to be used for future pulls and
488 pushes.
492 pushes.
489
493
490 If an exception is raised, the partly cloned/updated destination
494 If an exception is raised, the partly cloned/updated destination
491 repository will be deleted.
495 repository will be deleted.
492
496
493 Arguments:
497 Arguments:
494
498
495 source: repository object or URL
499 source: repository object or URL
496
500
497 dest: URL of destination repository to create (defaults to base
501 dest: URL of destination repository to create (defaults to base
498 name of source repository)
502 name of source repository)
499
503
500 pull: always pull from source repository, even in local case or if the
504 pull: always pull from source repository, even in local case or if the
501 server prefers streaming
505 server prefers streaming
502
506
503 stream: stream raw data uncompressed from repository (fast over
507 stream: stream raw data uncompressed from repository (fast over
504 LAN, slow over WAN)
508 LAN, slow over WAN)
505
509
506 revs: revision to clone up to (implies pull=True)
510 revs: revision to clone up to (implies pull=True)
507
511
508 update: update working directory after clone completes, if
512 update: update working directory after clone completes, if
509 destination is local repository (True means update to default rev,
513 destination is local repository (True means update to default rev,
510 anything else is treated as a revision)
514 anything else is treated as a revision)
511
515
512 branch: branches to clone
516 branch: branches to clone
513
517
514 shareopts: dict of options to control auto sharing behavior. The "pool" key
518 shareopts: dict of options to control auto sharing behavior. The "pool" key
515 activates auto sharing mode and defines the directory for stores. The
519 activates auto sharing mode and defines the directory for stores. The
516 "mode" key determines how to construct the directory name of the shared
520 "mode" key determines how to construct the directory name of the shared
517 repository. "identity" means the name is derived from the node of the first
521 repository. "identity" means the name is derived from the node of the first
518 changeset in the repository. "remote" means the name is derived from the
522 changeset in the repository. "remote" means the name is derived from the
519 remote's path/URL. Defaults to "identity."
523 remote's path/URL. Defaults to "identity."
520
524
521 storeincludepats and storeexcludepats: sets of file patterns to include and
525 storeincludepats and storeexcludepats: sets of file patterns to include and
522 exclude in the repository copy, respectively. If not defined, all files
526 exclude in the repository copy, respectively. If not defined, all files
523 will be included (a "full" clone). Otherwise a "narrow" clone containing
527 will be included (a "full" clone). Otherwise a "narrow" clone containing
524 only the requested files will be performed. If ``storeincludepats`` is not
528 only the requested files will be performed. If ``storeincludepats`` is not
525 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
529 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
526 ``path:.``. If both are empty sets, no files will be cloned.
530 ``path:.``. If both are empty sets, no files will be cloned.
527 """
531 """
528
532
529 if isinstance(source, bytes):
533 if isinstance(source, bytes):
530 origsource = ui.expandpath(source)
534 origsource = ui.expandpath(source)
531 source, branches = parseurl(origsource, branch)
535 source, branches = parseurl(origsource, branch)
532 srcpeer = peer(ui, peeropts, source)
536 srcpeer = peer(ui, peeropts, source)
533 else:
537 else:
534 srcpeer = source.peer() # in case we were called with a localrepo
538 srcpeer = source.peer() # in case we were called with a localrepo
535 branches = (None, branch or [])
539 branches = (None, branch or [])
536 origsource = source = srcpeer.url()
540 origsource = source = srcpeer.url()
537 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
541 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
538
542
539 if dest is None:
543 if dest is None:
540 dest = defaultdest(source)
544 dest = defaultdest(source)
541 if dest:
545 if dest:
542 ui.status(_("destination directory: %s\n") % dest)
546 ui.status(_("destination directory: %s\n") % dest)
543 else:
547 else:
544 dest = ui.expandpath(dest)
548 dest = ui.expandpath(dest)
545
549
546 dest = util.urllocalpath(dest)
550 dest = util.urllocalpath(dest)
547 source = util.urllocalpath(source)
551 source = util.urllocalpath(source)
548
552
549 if not dest:
553 if not dest:
550 raise error.Abort(_("empty destination path is not valid"))
554 raise error.Abort(_("empty destination path is not valid"))
551
555
552 destvfs = vfsmod.vfs(dest, expandpath=True)
556 destvfs = vfsmod.vfs(dest, expandpath=True)
553 if destvfs.lexists():
557 if destvfs.lexists():
554 if not destvfs.isdir():
558 if not destvfs.isdir():
555 raise error.Abort(_("destination '%s' already exists") % dest)
559 raise error.Abort(_("destination '%s' already exists") % dest)
556 elif destvfs.listdir():
560 elif destvfs.listdir():
557 raise error.Abort(_("destination '%s' is not empty") % dest)
561 raise error.Abort(_("destination '%s' is not empty") % dest)
558
562
559 createopts = {}
563 createopts = {}
560 narrow = False
564 narrow = False
561
565
562 if storeincludepats is not None:
566 if storeincludepats is not None:
563 narrowspec.validatepatterns(storeincludepats)
567 narrowspec.validatepatterns(storeincludepats)
564 narrow = True
568 narrow = True
565
569
566 if storeexcludepats is not None:
570 if storeexcludepats is not None:
567 narrowspec.validatepatterns(storeexcludepats)
571 narrowspec.validatepatterns(storeexcludepats)
568 narrow = True
572 narrow = True
569
573
570 if narrow:
574 if narrow:
571 # Include everything by default if only exclusion patterns defined.
575 # Include everything by default if only exclusion patterns defined.
572 if storeexcludepats and not storeincludepats:
576 if storeexcludepats and not storeincludepats:
573 storeincludepats = {'path:.'}
577 storeincludepats = {'path:.'}
574
578
575 createopts['narrowfiles'] = True
579 createopts['narrowfiles'] = True
576
580
577 if depth:
581 if depth:
578 createopts['shallowfilestore'] = True
582 createopts['shallowfilestore'] = True
579
583
580 if srcpeer.capable(b'lfs-serve'):
584 if srcpeer.capable(b'lfs-serve'):
581 # Repository creation honors the config if it disabled the extension, so
585 # Repository creation honors the config if it disabled the extension, so
582 # we can't just announce that lfs will be enabled. This check avoids
586 # we can't just announce that lfs will be enabled. This check avoids
583 # saying that lfs will be enabled, and then saying it's an unknown
587 # saying that lfs will be enabled, and then saying it's an unknown
584 # feature. The lfs creation option is set in either case so that a
588 # feature. The lfs creation option is set in either case so that a
585 # requirement is added. If the extension is explicitly disabled but the
589 # requirement is added. If the extension is explicitly disabled but the
586 # requirement is set, the clone aborts early, before transferring any
590 # requirement is set, the clone aborts early, before transferring any
587 # data.
591 # data.
588 createopts['lfs'] = True
592 createopts['lfs'] = True
589
593
590 if extensions.disabledext('lfs'):
594 if extensions.disabledext('lfs'):
591 ui.status(_('(remote is using large file support (lfs), but it is '
595 ui.status(_('(remote is using large file support (lfs), but it is '
592 'explicitly disabled in the local configuration)\n'))
596 'explicitly disabled in the local configuration)\n'))
593 else:
597 else:
594 ui.status(_('(remote is using large file support (lfs); lfs will '
598 ui.status(_('(remote is using large file support (lfs); lfs will '
595 'be enabled for this repository)\n'))
599 'be enabled for this repository)\n'))
596
600
597 shareopts = shareopts or {}
601 shareopts = shareopts or {}
598 sharepool = shareopts.get('pool')
602 sharepool = shareopts.get('pool')
599 sharenamemode = shareopts.get('mode')
603 sharenamemode = shareopts.get('mode')
600 if sharepool and islocal(dest):
604 if sharepool and islocal(dest):
601 sharepath = None
605 sharepath = None
602 if sharenamemode == 'identity':
606 if sharenamemode == 'identity':
603 # Resolve the name from the initial changeset in the remote
607 # Resolve the name from the initial changeset in the remote
604 # repository. This returns nullid when the remote is empty. It
608 # repository. This returns nullid when the remote is empty. It
605 # raises RepoLookupError if revision 0 is filtered or otherwise
609 # raises RepoLookupError if revision 0 is filtered or otherwise
606 # not available. If we fail to resolve, sharing is not enabled.
610 # not available. If we fail to resolve, sharing is not enabled.
607 try:
611 try:
608 with srcpeer.commandexecutor() as e:
612 with srcpeer.commandexecutor() as e:
609 rootnode = e.callcommand('lookup', {
613 rootnode = e.callcommand('lookup', {
610 'key': '0',
614 'key': '0',
611 }).result()
615 }).result()
612
616
613 if rootnode != node.nullid:
617 if rootnode != node.nullid:
614 sharepath = os.path.join(sharepool, node.hex(rootnode))
618 sharepath = os.path.join(sharepool, node.hex(rootnode))
615 else:
619 else:
616 ui.status(_('(not using pooled storage: '
620 ui.status(_('(not using pooled storage: '
617 'remote appears to be empty)\n'))
621 'remote appears to be empty)\n'))
618 except error.RepoLookupError:
622 except error.RepoLookupError:
619 ui.status(_('(not using pooled storage: '
623 ui.status(_('(not using pooled storage: '
620 'unable to resolve identity of remote)\n'))
624 'unable to resolve identity of remote)\n'))
621 elif sharenamemode == 'remote':
625 elif sharenamemode == 'remote':
622 sharepath = os.path.join(
626 sharepath = os.path.join(
623 sharepool, node.hex(hashlib.sha1(source).digest()))
627 sharepool, node.hex(hashlib.sha1(source).digest()))
624 else:
628 else:
625 raise error.Abort(_('unknown share naming mode: %s') %
629 raise error.Abort(_('unknown share naming mode: %s') %
626 sharenamemode)
630 sharenamemode)
627
631
628 # TODO this is a somewhat arbitrary restriction.
632 # TODO this is a somewhat arbitrary restriction.
629 if narrow:
633 if narrow:
630 ui.status(_('(pooled storage not supported for narrow clones)\n'))
634 ui.status(_('(pooled storage not supported for narrow clones)\n'))
631 sharepath = None
635 sharepath = None
632
636
633 if sharepath:
637 if sharepath:
634 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
638 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
635 dest, pull=pull, rev=revs, update=update,
639 dest, pull=pull, rev=revs, update=update,
636 stream=stream)
640 stream=stream)
637
641
638 srclock = destlock = cleandir = None
642 srclock = destlock = cleandir = None
639 srcrepo = srcpeer.local()
643 srcrepo = srcpeer.local()
640 try:
644 try:
641 abspath = origsource
645 abspath = origsource
642 if islocal(origsource):
646 if islocal(origsource):
643 abspath = os.path.abspath(util.urllocalpath(origsource))
647 abspath = os.path.abspath(util.urllocalpath(origsource))
644
648
645 if islocal(dest):
649 if islocal(dest):
646 cleandir = dest
650 cleandir = dest
647
651
648 copy = False
652 copy = False
649 if (srcrepo and srcrepo.cancopy() and islocal(dest)
653 if (srcrepo and srcrepo.cancopy() and islocal(dest)
650 and not phases.hassecret(srcrepo)):
654 and not phases.hassecret(srcrepo)):
651 copy = not pull and not revs
655 copy = not pull and not revs
652
656
653 # TODO this is a somewhat arbitrary restriction.
657 # TODO this is a somewhat arbitrary restriction.
654 if narrow:
658 if narrow:
655 copy = False
659 copy = False
656
660
657 if copy:
661 if copy:
658 try:
662 try:
659 # we use a lock here because if we race with commit, we
663 # we use a lock here because if we race with commit, we
660 # can end up with extra data in the cloned revlogs that's
664 # can end up with extra data in the cloned revlogs that's
661 # not pointed to by changesets, thus causing verify to
665 # not pointed to by changesets, thus causing verify to
662 # fail
666 # fail
663 srclock = srcrepo.lock(wait=False)
667 srclock = srcrepo.lock(wait=False)
664 except error.LockError:
668 except error.LockError:
665 copy = False
669 copy = False
666
670
667 if copy:
671 if copy:
668 srcrepo.hook('preoutgoing', throw=True, source='clone')
672 srcrepo.hook('preoutgoing', throw=True, source='clone')
669 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
673 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
670 if not os.path.exists(dest):
674 if not os.path.exists(dest):
671 util.makedirs(dest)
675 util.makedirs(dest)
672 else:
676 else:
673 # only clean up directories we create ourselves
677 # only clean up directories we create ourselves
674 cleandir = hgdir
678 cleandir = hgdir
675 try:
679 try:
676 destpath = hgdir
680 destpath = hgdir
677 util.makedir(destpath, notindexed=True)
681 util.makedir(destpath, notindexed=True)
678 except OSError as inst:
682 except OSError as inst:
679 if inst.errno == errno.EEXIST:
683 if inst.errno == errno.EEXIST:
680 cleandir = None
684 cleandir = None
681 raise error.Abort(_("destination '%s' already exists")
685 raise error.Abort(_("destination '%s' already exists")
682 % dest)
686 % dest)
683 raise
687 raise
684
688
685 destlock = copystore(ui, srcrepo, destpath)
689 destlock = copystore(ui, srcrepo, destpath)
686 # copy bookmarks over
690 # copy bookmarks over
687 srcbookmarks = srcrepo.vfs.join('bookmarks')
691 srcbookmarks = srcrepo.vfs.join('bookmarks')
688 dstbookmarks = os.path.join(destpath, 'bookmarks')
692 dstbookmarks = os.path.join(destpath, 'bookmarks')
689 if os.path.exists(srcbookmarks):
693 if os.path.exists(srcbookmarks):
690 util.copyfile(srcbookmarks, dstbookmarks)
694 util.copyfile(srcbookmarks, dstbookmarks)
691
695
692 dstcachedir = os.path.join(destpath, 'cache')
696 dstcachedir = os.path.join(destpath, 'cache')
693 for cache in cacheutil.cachetocopy(srcrepo):
697 for cache in cacheutil.cachetocopy(srcrepo):
694 _copycache(srcrepo, dstcachedir, cache)
698 _copycache(srcrepo, dstcachedir, cache)
695
699
696 # we need to re-init the repo after manually copying the data
700 # we need to re-init the repo after manually copying the data
697 # into it
701 # into it
698 destpeer = peer(srcrepo, peeropts, dest)
702 destpeer = peer(srcrepo, peeropts, dest)
699 srcrepo.hook('outgoing', source='clone',
703 srcrepo.hook('outgoing', source='clone',
700 node=node.hex(node.nullid))
704 node=node.hex(node.nullid))
701 else:
705 else:
702 try:
706 try:
703 # only pass ui when no srcrepo
707 # only pass ui when no srcrepo
704 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
708 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
705 createopts=createopts)
709 createopts=createopts)
706 except OSError as inst:
710 except OSError as inst:
707 if inst.errno == errno.EEXIST:
711 if inst.errno == errno.EEXIST:
708 cleandir = None
712 cleandir = None
709 raise error.Abort(_("destination '%s' already exists")
713 raise error.Abort(_("destination '%s' already exists")
710 % dest)
714 % dest)
711 raise
715 raise
712
716
713 if revs:
717 if revs:
714 if not srcpeer.capable('lookup'):
718 if not srcpeer.capable('lookup'):
715 raise error.Abort(_("src repository does not support "
719 raise error.Abort(_("src repository does not support "
716 "revision lookup and so doesn't "
720 "revision lookup and so doesn't "
717 "support clone by revision"))
721 "support clone by revision"))
718
722
719 # TODO this is batchable.
723 # TODO this is batchable.
720 remoterevs = []
724 remoterevs = []
721 for rev in revs:
725 for rev in revs:
722 with srcpeer.commandexecutor() as e:
726 with srcpeer.commandexecutor() as e:
723 remoterevs.append(e.callcommand('lookup', {
727 remoterevs.append(e.callcommand('lookup', {
724 'key': rev,
728 'key': rev,
725 }).result())
729 }).result())
726 revs = remoterevs
730 revs = remoterevs
727
731
728 checkout = revs[0]
732 checkout = revs[0]
729 else:
733 else:
730 revs = None
734 revs = None
731 local = destpeer.local()
735 local = destpeer.local()
732 if local:
736 if local:
733 if narrow:
737 if narrow:
734 with local.lock():
738 with local.wlock(), local.lock():
735 local.setnarrowpats(storeincludepats, storeexcludepats)
739 local.setnarrowpats(storeincludepats, storeexcludepats)
736
740
737 u = util.url(abspath)
741 u = util.url(abspath)
738 defaulturl = bytes(u)
742 defaulturl = bytes(u)
739 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
743 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
740 if not stream:
744 if not stream:
741 if pull:
745 if pull:
742 stream = False
746 stream = False
743 else:
747 else:
744 stream = None
748 stream = None
745 # internal config: ui.quietbookmarkmove
749 # internal config: ui.quietbookmarkmove
746 overrides = {('ui', 'quietbookmarkmove'): True}
750 overrides = {('ui', 'quietbookmarkmove'): True}
747 with local.ui.configoverride(overrides, 'clone'):
751 with local.ui.configoverride(overrides, 'clone'):
748 exchange.pull(local, srcpeer, revs,
752 exchange.pull(local, srcpeer, revs,
749 streamclonerequested=stream,
753 streamclonerequested=stream,
750 includepats=storeincludepats,
754 includepats=storeincludepats,
751 excludepats=storeexcludepats,
755 excludepats=storeexcludepats,
752 depth=depth)
756 depth=depth)
753 elif srcrepo:
757 elif srcrepo:
754 # TODO lift restriction once exchange.push() accepts narrow
758 # TODO lift restriction once exchange.push() accepts narrow
755 # push.
759 # push.
756 if narrow:
760 if narrow:
757 raise error.Abort(_('narrow clone not available for '
761 raise error.Abort(_('narrow clone not available for '
758 'remote destinations'))
762 'remote destinations'))
759
763
760 exchange.push(srcrepo, destpeer, revs=revs,
764 exchange.push(srcrepo, destpeer, revs=revs,
761 bookmarks=srcrepo._bookmarks.keys())
765 bookmarks=srcrepo._bookmarks.keys())
762 else:
766 else:
763 raise error.Abort(_("clone from remote to remote not supported")
767 raise error.Abort(_("clone from remote to remote not supported")
764 )
768 )
765
769
766 cleandir = None
770 cleandir = None
767
771
768 destrepo = destpeer.local()
772 destrepo = destpeer.local()
769 if destrepo:
773 if destrepo:
770 template = uimod.samplehgrcs['cloned']
774 template = uimod.samplehgrcs['cloned']
771 u = util.url(abspath)
775 u = util.url(abspath)
772 u.passwd = None
776 u.passwd = None
773 defaulturl = bytes(u)
777 defaulturl = bytes(u)
774 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
778 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
775 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
779 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
776
780
777 if ui.configbool('experimental', 'remotenames'):
781 if ui.configbool('experimental', 'remotenames'):
778 logexchange.pullremotenames(destrepo, srcpeer)
782 logexchange.pullremotenames(destrepo, srcpeer)
779
783
780 if update:
784 if update:
781 if update is not True:
785 if update is not True:
782 with srcpeer.commandexecutor() as e:
786 with srcpeer.commandexecutor() as e:
783 checkout = e.callcommand('lookup', {
787 checkout = e.callcommand('lookup', {
784 'key': update,
788 'key': update,
785 }).result()
789 }).result()
786
790
787 uprev = None
791 uprev = None
788 status = None
792 status = None
789 if checkout is not None:
793 if checkout is not None:
790 # Some extensions (at least hg-git and hg-subversion) have
794 # Some extensions (at least hg-git and hg-subversion) have
791 # a peer.lookup() implementation that returns a name instead
795 # a peer.lookup() implementation that returns a name instead
792 # of a nodeid. We work around it here until we've figured
796 # of a nodeid. We work around it here until we've figured
793 # out a better solution.
797 # out a better solution.
794 if len(checkout) == 20 and checkout in destrepo:
798 if len(checkout) == 20 and checkout in destrepo:
795 uprev = checkout
799 uprev = checkout
796 elif scmutil.isrevsymbol(destrepo, checkout):
800 elif scmutil.isrevsymbol(destrepo, checkout):
797 uprev = scmutil.revsymbol(destrepo, checkout).node()
801 uprev = scmutil.revsymbol(destrepo, checkout).node()
798 else:
802 else:
799 if update is not True:
803 if update is not True:
800 try:
804 try:
801 uprev = destrepo.lookup(update)
805 uprev = destrepo.lookup(update)
802 except error.RepoLookupError:
806 except error.RepoLookupError:
803 pass
807 pass
804 if uprev is None:
808 if uprev is None:
805 try:
809 try:
806 uprev = destrepo._bookmarks['@']
810 uprev = destrepo._bookmarks['@']
807 update = '@'
811 update = '@'
808 bn = destrepo[uprev].branch()
812 bn = destrepo[uprev].branch()
809 if bn == 'default':
813 if bn == 'default':
810 status = _("updating to bookmark @\n")
814 status = _("updating to bookmark @\n")
811 else:
815 else:
812 status = (_("updating to bookmark @ on branch %s\n")
816 status = (_("updating to bookmark @ on branch %s\n")
813 % bn)
817 % bn)
814 except KeyError:
818 except KeyError:
815 try:
819 try:
816 uprev = destrepo.branchtip('default')
820 uprev = destrepo.branchtip('default')
817 except error.RepoLookupError:
821 except error.RepoLookupError:
818 uprev = destrepo.lookup('tip')
822 uprev = destrepo.lookup('tip')
819 if not status:
823 if not status:
820 bn = destrepo[uprev].branch()
824 bn = destrepo[uprev].branch()
821 status = _("updating to branch %s\n") % bn
825 status = _("updating to branch %s\n") % bn
822 destrepo.ui.status(status)
826 destrepo.ui.status(status)
823 _update(destrepo, uprev)
827 _update(destrepo, uprev)
824 if update in destrepo._bookmarks:
828 if update in destrepo._bookmarks:
825 bookmarks.activate(destrepo, update)
829 bookmarks.activate(destrepo, update)
826 finally:
830 finally:
827 release(srclock, destlock)
831 release(srclock, destlock)
828 if cleandir is not None:
832 if cleandir is not None:
829 shutil.rmtree(cleandir, True)
833 shutil.rmtree(cleandir, True)
830 if srcpeer is not None:
834 if srcpeer is not None:
831 srcpeer.close()
835 srcpeer.close()
832 return srcpeer, destpeer
836 return srcpeer, destpeer
833
837
834 def _showstats(repo, stats, quietempty=False):
838 def _showstats(repo, stats, quietempty=False):
835 if quietempty and stats.isempty():
839 if quietempty and stats.isempty():
836 return
840 return
837 repo.ui.status(_("%d files updated, %d files merged, "
841 repo.ui.status(_("%d files updated, %d files merged, "
838 "%d files removed, %d files unresolved\n") % (
842 "%d files removed, %d files unresolved\n") % (
839 stats.updatedcount, stats.mergedcount,
843 stats.updatedcount, stats.mergedcount,
840 stats.removedcount, stats.unresolvedcount))
844 stats.removedcount, stats.unresolvedcount))
841
845
842 def updaterepo(repo, node, overwrite, updatecheck=None):
846 def updaterepo(repo, node, overwrite, updatecheck=None):
843 """Update the working directory to node.
847 """Update the working directory to node.
844
848
845 When overwrite is set, changes are clobbered, merged else
849 When overwrite is set, changes are clobbered, merged else
846
850
847 returns stats (see pydoc mercurial.merge.applyupdates)"""
851 returns stats (see pydoc mercurial.merge.applyupdates)"""
848 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
852 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
849 labels=['working copy', 'destination'],
853 labels=['working copy', 'destination'],
850 updatecheck=updatecheck)
854 updatecheck=updatecheck)
851
855
852 def update(repo, node, quietempty=False, updatecheck=None):
856 def update(repo, node, quietempty=False, updatecheck=None):
853 """update the working directory to node"""
857 """update the working directory to node"""
854 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
858 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
855 _showstats(repo, stats, quietempty)
859 _showstats(repo, stats, quietempty)
856 if stats.unresolvedcount:
860 if stats.unresolvedcount:
857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
861 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
858 return stats.unresolvedcount > 0
862 return stats.unresolvedcount > 0
859
863
860 # naming conflict in clone()
864 # naming conflict in clone()
861 _update = update
865 _update = update
862
866
863 def clean(repo, node, show_stats=True, quietempty=False):
867 def clean(repo, node, show_stats=True, quietempty=False):
864 """forcibly switch the working directory to node, clobbering changes"""
868 """forcibly switch the working directory to node, clobbering changes"""
865 stats = updaterepo(repo, node, True)
869 stats = updaterepo(repo, node, True)
866 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
870 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
867 if show_stats:
871 if show_stats:
868 _showstats(repo, stats, quietempty)
872 _showstats(repo, stats, quietempty)
869 return stats.unresolvedcount > 0
873 return stats.unresolvedcount > 0
870
874
871 # naming conflict in updatetotally()
875 # naming conflict in updatetotally()
872 _clean = clean
876 _clean = clean
873
877
874 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
878 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
875 """Update the working directory with extra care for non-file components
879 """Update the working directory with extra care for non-file components
876
880
877 This takes care of non-file components below:
881 This takes care of non-file components below:
878
882
879 :bookmark: might be advanced or (in)activated
883 :bookmark: might be advanced or (in)activated
880
884
881 This takes arguments below:
885 This takes arguments below:
882
886
883 :checkout: to which revision the working directory is updated
887 :checkout: to which revision the working directory is updated
884 :brev: a name, which might be a bookmark to be activated after updating
888 :brev: a name, which might be a bookmark to be activated after updating
885 :clean: whether changes in the working directory can be discarded
889 :clean: whether changes in the working directory can be discarded
886 :updatecheck: how to deal with a dirty working directory
890 :updatecheck: how to deal with a dirty working directory
887
891
888 Valid values for updatecheck are (None => linear):
892 Valid values for updatecheck are (None => linear):
889
893
890 * abort: abort if the working directory is dirty
894 * abort: abort if the working directory is dirty
891 * none: don't check (merge working directory changes into destination)
895 * none: don't check (merge working directory changes into destination)
892 * linear: check that update is linear before merging working directory
896 * linear: check that update is linear before merging working directory
893 changes into destination
897 changes into destination
894 * noconflict: check that the update does not result in file merges
898 * noconflict: check that the update does not result in file merges
895
899
896 This returns whether conflict is detected at updating or not.
900 This returns whether conflict is detected at updating or not.
897 """
901 """
898 if updatecheck is None:
902 if updatecheck is None:
899 updatecheck = ui.config('commands', 'update.check')
903 updatecheck = ui.config('commands', 'update.check')
900 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
904 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
901 # If not configured, or invalid value configured
905 # If not configured, or invalid value configured
902 updatecheck = 'linear'
906 updatecheck = 'linear'
903 with repo.wlock():
907 with repo.wlock():
904 movemarkfrom = None
908 movemarkfrom = None
905 warndest = False
909 warndest = False
906 if checkout is None:
910 if checkout is None:
907 updata = destutil.destupdate(repo, clean=clean)
911 updata = destutil.destupdate(repo, clean=clean)
908 checkout, movemarkfrom, brev = updata
912 checkout, movemarkfrom, brev = updata
909 warndest = True
913 warndest = True
910
914
911 if clean:
915 if clean:
912 ret = _clean(repo, checkout)
916 ret = _clean(repo, checkout)
913 else:
917 else:
914 if updatecheck == 'abort':
918 if updatecheck == 'abort':
915 cmdutil.bailifchanged(repo, merge=False)
919 cmdutil.bailifchanged(repo, merge=False)
916 updatecheck = 'none'
920 updatecheck = 'none'
917 ret = _update(repo, checkout, updatecheck=updatecheck)
921 ret = _update(repo, checkout, updatecheck=updatecheck)
918
922
919 if not ret and movemarkfrom:
923 if not ret and movemarkfrom:
920 if movemarkfrom == repo['.'].node():
924 if movemarkfrom == repo['.'].node():
921 pass # no-op update
925 pass # no-op update
922 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
926 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
923 b = ui.label(repo._activebookmark, 'bookmarks.active')
927 b = ui.label(repo._activebookmark, 'bookmarks.active')
924 ui.status(_("updating bookmark %s\n") % b)
928 ui.status(_("updating bookmark %s\n") % b)
925 else:
929 else:
926 # this can happen with a non-linear update
930 # this can happen with a non-linear update
927 b = ui.label(repo._activebookmark, 'bookmarks')
931 b = ui.label(repo._activebookmark, 'bookmarks')
928 ui.status(_("(leaving bookmark %s)\n") % b)
932 ui.status(_("(leaving bookmark %s)\n") % b)
929 bookmarks.deactivate(repo)
933 bookmarks.deactivate(repo)
930 elif brev in repo._bookmarks:
934 elif brev in repo._bookmarks:
931 if brev != repo._activebookmark:
935 if brev != repo._activebookmark:
932 b = ui.label(brev, 'bookmarks.active')
936 b = ui.label(brev, 'bookmarks.active')
933 ui.status(_("(activating bookmark %s)\n") % b)
937 ui.status(_("(activating bookmark %s)\n") % b)
934 bookmarks.activate(repo, brev)
938 bookmarks.activate(repo, brev)
935 elif brev:
939 elif brev:
936 if repo._activebookmark:
940 if repo._activebookmark:
937 b = ui.label(repo._activebookmark, 'bookmarks')
941 b = ui.label(repo._activebookmark, 'bookmarks')
938 ui.status(_("(leaving bookmark %s)\n") % b)
942 ui.status(_("(leaving bookmark %s)\n") % b)
939 bookmarks.deactivate(repo)
943 bookmarks.deactivate(repo)
940
944
941 if warndest:
945 if warndest:
942 destutil.statusotherdests(ui, repo)
946 destutil.statusotherdests(ui, repo)
943
947
944 return ret
948 return ret
945
949
946 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
950 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
947 abort=False):
951 abort=False):
948 """Branch merge with node, resolving changes. Return true if any
952 """Branch merge with node, resolving changes. Return true if any
949 unresolved conflicts."""
953 unresolved conflicts."""
950 if not abort:
954 if not abort:
951 stats = mergemod.update(repo, node, branchmerge=True, force=force,
955 stats = mergemod.update(repo, node, branchmerge=True, force=force,
952 mergeforce=mergeforce, labels=labels)
956 mergeforce=mergeforce, labels=labels)
953 else:
957 else:
954 ms = mergemod.mergestate.read(repo)
958 ms = mergemod.mergestate.read(repo)
955 if ms.active():
959 if ms.active():
956 # there were conflicts
960 # there were conflicts
957 node = ms.localctx.hex()
961 node = ms.localctx.hex()
958 else:
962 else:
959 # there were no conficts, mergestate was not stored
963 # there were no conficts, mergestate was not stored
960 node = repo['.'].hex()
964 node = repo['.'].hex()
961
965
962 repo.ui.status(_("aborting the merge, updating back to"
966 repo.ui.status(_("aborting the merge, updating back to"
963 " %s\n") % node[:12])
967 " %s\n") % node[:12])
964 stats = mergemod.update(repo, node, branchmerge=False, force=True,
968 stats = mergemod.update(repo, node, branchmerge=False, force=True,
965 labels=labels)
969 labels=labels)
966
970
967 _showstats(repo, stats)
971 _showstats(repo, stats)
968 if stats.unresolvedcount:
972 if stats.unresolvedcount:
969 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
973 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
970 "or 'hg merge --abort' to abandon\n"))
974 "or 'hg merge --abort' to abandon\n"))
971 elif remind and not abort:
975 elif remind and not abort:
972 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
976 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
973 return stats.unresolvedcount > 0
977 return stats.unresolvedcount > 0
974
978
975 def _incoming(displaychlist, subreporecurse, ui, repo, source,
979 def _incoming(displaychlist, subreporecurse, ui, repo, source,
976 opts, buffered=False):
980 opts, buffered=False):
977 """
981 """
978 Helper for incoming / gincoming.
982 Helper for incoming / gincoming.
979 displaychlist gets called with
983 displaychlist gets called with
980 (remoterepo, incomingchangesetlist, displayer) parameters,
984 (remoterepo, incomingchangesetlist, displayer) parameters,
981 and is supposed to contain only code that can't be unified.
985 and is supposed to contain only code that can't be unified.
982 """
986 """
983 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
987 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
984 other = peer(repo, opts, source)
988 other = peer(repo, opts, source)
985 ui.status(_('comparing with %s\n') % util.hidepassword(source))
989 ui.status(_('comparing with %s\n') % util.hidepassword(source))
986 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
990 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
987
991
988 if revs:
992 if revs:
989 revs = [other.lookup(rev) for rev in revs]
993 revs = [other.lookup(rev) for rev in revs]
990 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
994 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
991 revs, opts["bundle"], opts["force"])
995 revs, opts["bundle"], opts["force"])
992 try:
996 try:
993 if not chlist:
997 if not chlist:
994 ui.status(_("no changes found\n"))
998 ui.status(_("no changes found\n"))
995 return subreporecurse()
999 return subreporecurse()
996 ui.pager('incoming')
1000 ui.pager('incoming')
997 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1001 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
998 buffered=buffered)
1002 buffered=buffered)
999 displaychlist(other, chlist, displayer)
1003 displaychlist(other, chlist, displayer)
1000 displayer.close()
1004 displayer.close()
1001 finally:
1005 finally:
1002 cleanupfn()
1006 cleanupfn()
1003 subreporecurse()
1007 subreporecurse()
1004 return 0 # exit code is zero since we found incoming changes
1008 return 0 # exit code is zero since we found incoming changes
1005
1009
1006 def incoming(ui, repo, source, opts):
1010 def incoming(ui, repo, source, opts):
1007 def subreporecurse():
1011 def subreporecurse():
1008 ret = 1
1012 ret = 1
1009 if opts.get('subrepos'):
1013 if opts.get('subrepos'):
1010 ctx = repo[None]
1014 ctx = repo[None]
1011 for subpath in sorted(ctx.substate):
1015 for subpath in sorted(ctx.substate):
1012 sub = ctx.sub(subpath)
1016 sub = ctx.sub(subpath)
1013 ret = min(ret, sub.incoming(ui, source, opts))
1017 ret = min(ret, sub.incoming(ui, source, opts))
1014 return ret
1018 return ret
1015
1019
1016 def display(other, chlist, displayer):
1020 def display(other, chlist, displayer):
1017 limit = logcmdutil.getlimit(opts)
1021 limit = logcmdutil.getlimit(opts)
1018 if opts.get('newest_first'):
1022 if opts.get('newest_first'):
1019 chlist.reverse()
1023 chlist.reverse()
1020 count = 0
1024 count = 0
1021 for n in chlist:
1025 for n in chlist:
1022 if limit is not None and count >= limit:
1026 if limit is not None and count >= limit:
1023 break
1027 break
1024 parents = [p for p in other.changelog.parents(n) if p != nullid]
1028 parents = [p for p in other.changelog.parents(n) if p != nullid]
1025 if opts.get('no_merges') and len(parents) == 2:
1029 if opts.get('no_merges') and len(parents) == 2:
1026 continue
1030 continue
1027 count += 1
1031 count += 1
1028 displayer.show(other[n])
1032 displayer.show(other[n])
1029 return _incoming(display, subreporecurse, ui, repo, source, opts)
1033 return _incoming(display, subreporecurse, ui, repo, source, opts)
1030
1034
1031 def _outgoing(ui, repo, dest, opts):
1035 def _outgoing(ui, repo, dest, opts):
1032 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1036 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1033 if not path:
1037 if not path:
1034 raise error.Abort(_('default repository not configured!'),
1038 raise error.Abort(_('default repository not configured!'),
1035 hint=_("see 'hg help config.paths'"))
1039 hint=_("see 'hg help config.paths'"))
1036 dest = path.pushloc or path.loc
1040 dest = path.pushloc or path.loc
1037 branches = path.branch, opts.get('branch') or []
1041 branches = path.branch, opts.get('branch') or []
1038
1042
1039 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1043 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1040 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1044 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1041 if revs:
1045 if revs:
1042 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1046 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1043
1047
1044 other = peer(repo, opts, dest)
1048 other = peer(repo, opts, dest)
1045 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1049 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1046 force=opts.get('force'))
1050 force=opts.get('force'))
1047 o = outgoing.missing
1051 o = outgoing.missing
1048 if not o:
1052 if not o:
1049 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1053 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1050 return o, other
1054 return o, other
1051
1055
1052 def outgoing(ui, repo, dest, opts):
1056 def outgoing(ui, repo, dest, opts):
1053 def recurse():
1057 def recurse():
1054 ret = 1
1058 ret = 1
1055 if opts.get('subrepos'):
1059 if opts.get('subrepos'):
1056 ctx = repo[None]
1060 ctx = repo[None]
1057 for subpath in sorted(ctx.substate):
1061 for subpath in sorted(ctx.substate):
1058 sub = ctx.sub(subpath)
1062 sub = ctx.sub(subpath)
1059 ret = min(ret, sub.outgoing(ui, dest, opts))
1063 ret = min(ret, sub.outgoing(ui, dest, opts))
1060 return ret
1064 return ret
1061
1065
1062 limit = logcmdutil.getlimit(opts)
1066 limit = logcmdutil.getlimit(opts)
1063 o, other = _outgoing(ui, repo, dest, opts)
1067 o, other = _outgoing(ui, repo, dest, opts)
1064 if not o:
1068 if not o:
1065 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1069 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1066 return recurse()
1070 return recurse()
1067
1071
1068 if opts.get('newest_first'):
1072 if opts.get('newest_first'):
1069 o.reverse()
1073 o.reverse()
1070 ui.pager('outgoing')
1074 ui.pager('outgoing')
1071 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1075 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1072 count = 0
1076 count = 0
1073 for n in o:
1077 for n in o:
1074 if limit is not None and count >= limit:
1078 if limit is not None and count >= limit:
1075 break
1079 break
1076 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1080 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1077 if opts.get('no_merges') and len(parents) == 2:
1081 if opts.get('no_merges') and len(parents) == 2:
1078 continue
1082 continue
1079 count += 1
1083 count += 1
1080 displayer.show(repo[n])
1084 displayer.show(repo[n])
1081 displayer.close()
1085 displayer.close()
1082 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1086 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1083 recurse()
1087 recurse()
1084 return 0 # exit code is zero since we found outgoing changes
1088 return 0 # exit code is zero since we found outgoing changes
1085
1089
1086 def verify(repo):
1090 def verify(repo):
1087 """verify the consistency of a repository"""
1091 """verify the consistency of a repository"""
1088 ret = verifymod.verify(repo)
1092 ret = verifymod.verify(repo)
1089
1093
1090 # Broken subrepo references in hidden csets don't seem worth worrying about,
1094 # Broken subrepo references in hidden csets don't seem worth worrying about,
1091 # since they can't be pushed/pulled, and --hidden can be used if they are a
1095 # since they can't be pushed/pulled, and --hidden can be used if they are a
1092 # concern.
1096 # concern.
1093
1097
1094 # pathto() is needed for -R case
1098 # pathto() is needed for -R case
1095 revs = repo.revs("filelog(%s)",
1099 revs = repo.revs("filelog(%s)",
1096 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1100 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1097
1101
1098 if revs:
1102 if revs:
1099 repo.ui.status(_('checking subrepo links\n'))
1103 repo.ui.status(_('checking subrepo links\n'))
1100 for rev in revs:
1104 for rev in revs:
1101 ctx = repo[rev]
1105 ctx = repo[rev]
1102 try:
1106 try:
1103 for subpath in ctx.substate:
1107 for subpath in ctx.substate:
1104 try:
1108 try:
1105 ret = (ctx.sub(subpath, allowcreate=False).verify()
1109 ret = (ctx.sub(subpath, allowcreate=False).verify()
1106 or ret)
1110 or ret)
1107 except error.RepoError as e:
1111 except error.RepoError as e:
1108 repo.ui.warn(('%d: %s\n') % (rev, e))
1112 repo.ui.warn(('%d: %s\n') % (rev, e))
1109 except Exception:
1113 except Exception:
1110 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1114 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1111 node.short(ctx.node()))
1115 node.short(ctx.node()))
1112
1116
1113 return ret
1117 return ret
1114
1118
1115 def remoteui(src, opts):
1119 def remoteui(src, opts):
1116 'build a remote ui from ui or repo and opts'
1120 'build a remote ui from ui or repo and opts'
1117 if util.safehasattr(src, 'baseui'): # looks like a repository
1121 if util.safehasattr(src, 'baseui'): # looks like a repository
1118 dst = src.baseui.copy() # drop repo-specific config
1122 dst = src.baseui.copy() # drop repo-specific config
1119 src = src.ui # copy target options from repo
1123 src = src.ui # copy target options from repo
1120 else: # assume it's a global ui object
1124 else: # assume it's a global ui object
1121 dst = src.copy() # keep all global options
1125 dst = src.copy() # keep all global options
1122
1126
1123 # copy ssh-specific options
1127 # copy ssh-specific options
1124 for o in 'ssh', 'remotecmd':
1128 for o in 'ssh', 'remotecmd':
1125 v = opts.get(o) or src.config('ui', o)
1129 v = opts.get(o) or src.config('ui', o)
1126 if v:
1130 if v:
1127 dst.setconfig("ui", o, v, 'copied')
1131 dst.setconfig("ui", o, v, 'copied')
1128
1132
1129 # copy bundle-specific options
1133 # copy bundle-specific options
1130 r = src.config('bundle', 'mainreporoot')
1134 r = src.config('bundle', 'mainreporoot')
1131 if r:
1135 if r:
1132 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1136 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1133
1137
1134 # copy selected local settings to the remote ui
1138 # copy selected local settings to the remote ui
1135 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1139 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1136 for key, val in src.configitems(sect):
1140 for key, val in src.configitems(sect):
1137 dst.setconfig(sect, key, val, 'copied')
1141 dst.setconfig(sect, key, val, 'copied')
1138 v = src.config('web', 'cacerts')
1142 v = src.config('web', 'cacerts')
1139 if v:
1143 if v:
1140 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1144 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1141
1145
1142 return dst
1146 return dst
1143
1147
1144 # Files of interest
1148 # Files of interest
1145 # Used to check if the repository has changed looking at mtime and size of
1149 # Used to check if the repository has changed looking at mtime and size of
1146 # these files.
1150 # these files.
1147 foi = [('spath', '00changelog.i'),
1151 foi = [('spath', '00changelog.i'),
1148 ('spath', 'phaseroots'), # ! phase can change content at the same size
1152 ('spath', 'phaseroots'), # ! phase can change content at the same size
1149 ('spath', 'obsstore'),
1153 ('spath', 'obsstore'),
1150 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1154 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1151 ]
1155 ]
1152
1156
1153 class cachedlocalrepo(object):
1157 class cachedlocalrepo(object):
1154 """Holds a localrepository that can be cached and reused."""
1158 """Holds a localrepository that can be cached and reused."""
1155
1159
1156 def __init__(self, repo):
1160 def __init__(self, repo):
1157 """Create a new cached repo from an existing repo.
1161 """Create a new cached repo from an existing repo.
1158
1162
1159 We assume the passed in repo was recently created. If the
1163 We assume the passed in repo was recently created. If the
1160 repo has changed between when it was created and when it was
1164 repo has changed between when it was created and when it was
1161 turned into a cache, it may not refresh properly.
1165 turned into a cache, it may not refresh properly.
1162 """
1166 """
1163 assert isinstance(repo, localrepo.localrepository)
1167 assert isinstance(repo, localrepo.localrepository)
1164 self._repo = repo
1168 self._repo = repo
1165 self._state, self.mtime = self._repostate()
1169 self._state, self.mtime = self._repostate()
1166 self._filtername = repo.filtername
1170 self._filtername = repo.filtername
1167
1171
1168 def fetch(self):
1172 def fetch(self):
1169 """Refresh (if necessary) and return a repository.
1173 """Refresh (if necessary) and return a repository.
1170
1174
1171 If the cached instance is out of date, it will be recreated
1175 If the cached instance is out of date, it will be recreated
1172 automatically and returned.
1176 automatically and returned.
1173
1177
1174 Returns a tuple of the repo and a boolean indicating whether a new
1178 Returns a tuple of the repo and a boolean indicating whether a new
1175 repo instance was created.
1179 repo instance was created.
1176 """
1180 """
1177 # We compare the mtimes and sizes of some well-known files to
1181 # We compare the mtimes and sizes of some well-known files to
1178 # determine if the repo changed. This is not precise, as mtimes
1182 # determine if the repo changed. This is not precise, as mtimes
1179 # are susceptible to clock skew and imprecise filesystems and
1183 # are susceptible to clock skew and imprecise filesystems and
1180 # file content can change while maintaining the same size.
1184 # file content can change while maintaining the same size.
1181
1185
1182 state, mtime = self._repostate()
1186 state, mtime = self._repostate()
1183 if state == self._state:
1187 if state == self._state:
1184 return self._repo, False
1188 return self._repo, False
1185
1189
1186 repo = repository(self._repo.baseui, self._repo.url())
1190 repo = repository(self._repo.baseui, self._repo.url())
1187 if self._filtername:
1191 if self._filtername:
1188 self._repo = repo.filtered(self._filtername)
1192 self._repo = repo.filtered(self._filtername)
1189 else:
1193 else:
1190 self._repo = repo.unfiltered()
1194 self._repo = repo.unfiltered()
1191 self._state = state
1195 self._state = state
1192 self.mtime = mtime
1196 self.mtime = mtime
1193
1197
1194 return self._repo, True
1198 return self._repo, True
1195
1199
1196 def _repostate(self):
1200 def _repostate(self):
1197 state = []
1201 state = []
1198 maxmtime = -1
1202 maxmtime = -1
1199 for attr, fname in foi:
1203 for attr, fname in foi:
1200 prefix = getattr(self._repo, attr)
1204 prefix = getattr(self._repo, attr)
1201 p = os.path.join(prefix, fname)
1205 p = os.path.join(prefix, fname)
1202 try:
1206 try:
1203 st = os.stat(p)
1207 st = os.stat(p)
1204 except OSError:
1208 except OSError:
1205 st = os.stat(prefix)
1209 st = os.stat(prefix)
1206 state.append((st[stat.ST_MTIME], st.st_size))
1210 state.append((st[stat.ST_MTIME], st.st_size))
1207 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1211 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1208
1212
1209 return tuple(state), maxmtime
1213 return tuple(state), maxmtime
1210
1214
1211 def copy(self):
1215 def copy(self):
1212 """Obtain a copy of this class instance.
1216 """Obtain a copy of this class instance.
1213
1217
1214 A new localrepository instance is obtained. The new instance should be
1218 A new localrepository instance is obtained. The new instance should be
1215 completely independent of the original.
1219 completely independent of the original.
1216 """
1220 """
1217 repo = repository(self._repo.baseui, self._repo.origroot)
1221 repo = repository(self._repo.baseui, self._repo.origroot)
1218 if self._filtername:
1222 if self._filtername:
1219 repo = repo.filtered(self._filtername)
1223 repo = repo.filtered(self._filtername)
1220 else:
1224 else:
1221 repo = repo.unfiltered()
1225 repo = repo.unfiltered()
1222 c = cachedlocalrepo(repo)
1226 c = cachedlocalrepo(repo)
1223 c._state = self._state
1227 c._state = self._state
1224 c.mtime = self.mtime
1228 c.mtime = self.mtime
1225 return c
1229 return c
@@ -1,3077 +1,3085 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 def isfilecached(repo, name):
125 def isfilecached(repo, name):
126 """check if a repo has already cached "name" filecache-ed property
126 """check if a repo has already cached "name" filecache-ed property
127
127
128 This returns (cachedobj-or-None, iscached) tuple.
128 This returns (cachedobj-or-None, iscached) tuple.
129 """
129 """
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 if not cacheentry:
131 if not cacheentry:
132 return None, False
132 return None, False
133 return cacheentry.obj, True
133 return cacheentry.obj, True
134
134
135 class unfilteredpropertycache(util.propertycache):
135 class unfilteredpropertycache(util.propertycache):
136 """propertycache that apply to unfiltered repo only"""
136 """propertycache that apply to unfiltered repo only"""
137
137
138 def __get__(self, repo, type=None):
138 def __get__(self, repo, type=None):
139 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
140 if unfi is repo:
140 if unfi is repo:
141 return super(unfilteredpropertycache, self).__get__(unfi)
141 return super(unfilteredpropertycache, self).__get__(unfi)
142 return getattr(unfi, self.name)
142 return getattr(unfi, self.name)
143
143
144 class filteredpropertycache(util.propertycache):
144 class filteredpropertycache(util.propertycache):
145 """propertycache that must take filtering in account"""
145 """propertycache that must take filtering in account"""
146
146
147 def cachevalue(self, obj, value):
147 def cachevalue(self, obj, value):
148 object.__setattr__(obj, self.name, value)
148 object.__setattr__(obj, self.name, value)
149
149
150
150
151 def hasunfilteredcache(repo, name):
151 def hasunfilteredcache(repo, name):
152 """check if a repo has an unfilteredpropertycache value for <name>"""
152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 return name in vars(repo.unfiltered())
153 return name in vars(repo.unfiltered())
154
154
155 def unfilteredmethod(orig):
155 def unfilteredmethod(orig):
156 """decorate method that always need to be run on unfiltered version"""
156 """decorate method that always need to be run on unfiltered version"""
157 def wrapper(repo, *args, **kwargs):
157 def wrapper(repo, *args, **kwargs):
158 return orig(repo.unfiltered(), *args, **kwargs)
158 return orig(repo.unfiltered(), *args, **kwargs)
159 return wrapper
159 return wrapper
160
160
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 'unbundle'}
162 'unbundle'}
163 legacycaps = moderncaps.union({'changegroupsubset'})
163 legacycaps = moderncaps.union({'changegroupsubset'})
164
164
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 class localcommandexecutor(object):
166 class localcommandexecutor(object):
167 def __init__(self, peer):
167 def __init__(self, peer):
168 self._peer = peer
168 self._peer = peer
169 self._sent = False
169 self._sent = False
170 self._closed = False
170 self._closed = False
171
171
172 def __enter__(self):
172 def __enter__(self):
173 return self
173 return self
174
174
175 def __exit__(self, exctype, excvalue, exctb):
175 def __exit__(self, exctype, excvalue, exctb):
176 self.close()
176 self.close()
177
177
178 def callcommand(self, command, args):
178 def callcommand(self, command, args):
179 if self._sent:
179 if self._sent:
180 raise error.ProgrammingError('callcommand() cannot be used after '
180 raise error.ProgrammingError('callcommand() cannot be used after '
181 'sendcommands()')
181 'sendcommands()')
182
182
183 if self._closed:
183 if self._closed:
184 raise error.ProgrammingError('callcommand() cannot be used after '
184 raise error.ProgrammingError('callcommand() cannot be used after '
185 'close()')
185 'close()')
186
186
187 # We don't need to support anything fancy. Just call the named
187 # We don't need to support anything fancy. Just call the named
188 # method on the peer and return a resolved future.
188 # method on the peer and return a resolved future.
189 fn = getattr(self._peer, pycompat.sysstr(command))
189 fn = getattr(self._peer, pycompat.sysstr(command))
190
190
191 f = pycompat.futures.Future()
191 f = pycompat.futures.Future()
192
192
193 try:
193 try:
194 result = fn(**pycompat.strkwargs(args))
194 result = fn(**pycompat.strkwargs(args))
195 except Exception:
195 except Exception:
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 else:
197 else:
198 f.set_result(result)
198 f.set_result(result)
199
199
200 return f
200 return f
201
201
202 def sendcommands(self):
202 def sendcommands(self):
203 self._sent = True
203 self._sent = True
204
204
205 def close(self):
205 def close(self):
206 self._closed = True
206 self._closed = True
207
207
208 @interfaceutil.implementer(repository.ipeercommands)
208 @interfaceutil.implementer(repository.ipeercommands)
209 class localpeer(repository.peer):
209 class localpeer(repository.peer):
210 '''peer for a local repo; reflects only the most recent API'''
210 '''peer for a local repo; reflects only the most recent API'''
211
211
212 def __init__(self, repo, caps=None):
212 def __init__(self, repo, caps=None):
213 super(localpeer, self).__init__()
213 super(localpeer, self).__init__()
214
214
215 if caps is None:
215 if caps is None:
216 caps = moderncaps.copy()
216 caps = moderncaps.copy()
217 self._repo = repo.filtered('served')
217 self._repo = repo.filtered('served')
218 self.ui = repo.ui
218 self.ui = repo.ui
219 self._caps = repo._restrictcapabilities(caps)
219 self._caps = repo._restrictcapabilities(caps)
220
220
221 # Begin of _basepeer interface.
221 # Begin of _basepeer interface.
222
222
223 def url(self):
223 def url(self):
224 return self._repo.url()
224 return self._repo.url()
225
225
226 def local(self):
226 def local(self):
227 return self._repo
227 return self._repo
228
228
229 def peer(self):
229 def peer(self):
230 return self
230 return self
231
231
232 def canpush(self):
232 def canpush(self):
233 return True
233 return True
234
234
235 def close(self):
235 def close(self):
236 self._repo.close()
236 self._repo.close()
237
237
238 # End of _basepeer interface.
238 # End of _basepeer interface.
239
239
240 # Begin of _basewirecommands interface.
240 # Begin of _basewirecommands interface.
241
241
242 def branchmap(self):
242 def branchmap(self):
243 return self._repo.branchmap()
243 return self._repo.branchmap()
244
244
245 def capabilities(self):
245 def capabilities(self):
246 return self._caps
246 return self._caps
247
247
248 def clonebundles(self):
248 def clonebundles(self):
249 return self._repo.tryread('clonebundles.manifest')
249 return self._repo.tryread('clonebundles.manifest')
250
250
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 """Used to test argument passing over the wire"""
252 """Used to test argument passing over the wire"""
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 pycompat.bytestr(four),
254 pycompat.bytestr(four),
255 pycompat.bytestr(five))
255 pycompat.bytestr(five))
256
256
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 **kwargs):
258 **kwargs):
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 common=common, bundlecaps=bundlecaps,
260 common=common, bundlecaps=bundlecaps,
261 **kwargs)[1]
261 **kwargs)[1]
262 cb = util.chunkbuffer(chunks)
262 cb = util.chunkbuffer(chunks)
263
263
264 if exchange.bundle2requested(bundlecaps):
264 if exchange.bundle2requested(bundlecaps):
265 # When requesting a bundle2, getbundle returns a stream to make the
265 # When requesting a bundle2, getbundle returns a stream to make the
266 # wire level function happier. We need to build a proper object
266 # wire level function happier. We need to build a proper object
267 # from it in local peer.
267 # from it in local peer.
268 return bundle2.getunbundler(self.ui, cb)
268 return bundle2.getunbundler(self.ui, cb)
269 else:
269 else:
270 return changegroup.getunbundler('01', cb, None)
270 return changegroup.getunbundler('01', cb, None)
271
271
272 def heads(self):
272 def heads(self):
273 return self._repo.heads()
273 return self._repo.heads()
274
274
275 def known(self, nodes):
275 def known(self, nodes):
276 return self._repo.known(nodes)
276 return self._repo.known(nodes)
277
277
278 def listkeys(self, namespace):
278 def listkeys(self, namespace):
279 return self._repo.listkeys(namespace)
279 return self._repo.listkeys(namespace)
280
280
281 def lookup(self, key):
281 def lookup(self, key):
282 return self._repo.lookup(key)
282 return self._repo.lookup(key)
283
283
284 def pushkey(self, namespace, key, old, new):
284 def pushkey(self, namespace, key, old, new):
285 return self._repo.pushkey(namespace, key, old, new)
285 return self._repo.pushkey(namespace, key, old, new)
286
286
287 def stream_out(self):
287 def stream_out(self):
288 raise error.Abort(_('cannot perform stream clone against local '
288 raise error.Abort(_('cannot perform stream clone against local '
289 'peer'))
289 'peer'))
290
290
291 def unbundle(self, bundle, heads, url):
291 def unbundle(self, bundle, heads, url):
292 """apply a bundle on a repo
292 """apply a bundle on a repo
293
293
294 This function handles the repo locking itself."""
294 This function handles the repo locking itself."""
295 try:
295 try:
296 try:
296 try:
297 bundle = exchange.readbundle(self.ui, bundle, None)
297 bundle = exchange.readbundle(self.ui, bundle, None)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 if util.safehasattr(ret, 'getchunks'):
299 if util.safehasattr(ret, 'getchunks'):
300 # This is a bundle20 object, turn it into an unbundler.
300 # This is a bundle20 object, turn it into an unbundler.
301 # This little dance should be dropped eventually when the
301 # This little dance should be dropped eventually when the
302 # API is finally improved.
302 # API is finally improved.
303 stream = util.chunkbuffer(ret.getchunks())
303 stream = util.chunkbuffer(ret.getchunks())
304 ret = bundle2.getunbundler(self.ui, stream)
304 ret = bundle2.getunbundler(self.ui, stream)
305 return ret
305 return ret
306 except Exception as exc:
306 except Exception as exc:
307 # If the exception contains output salvaged from a bundle2
307 # If the exception contains output salvaged from a bundle2
308 # reply, we need to make sure it is printed before continuing
308 # reply, we need to make sure it is printed before continuing
309 # to fail. So we build a bundle2 with such output and consume
309 # to fail. So we build a bundle2 with such output and consume
310 # it directly.
310 # it directly.
311 #
311 #
312 # This is not very elegant but allows a "simple" solution for
312 # This is not very elegant but allows a "simple" solution for
313 # issue4594
313 # issue4594
314 output = getattr(exc, '_bundle2salvagedoutput', ())
314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 if output:
315 if output:
316 bundler = bundle2.bundle20(self._repo.ui)
316 bundler = bundle2.bundle20(self._repo.ui)
317 for out in output:
317 for out in output:
318 bundler.addpart(out)
318 bundler.addpart(out)
319 stream = util.chunkbuffer(bundler.getchunks())
319 stream = util.chunkbuffer(bundler.getchunks())
320 b = bundle2.getunbundler(self.ui, stream)
320 b = bundle2.getunbundler(self.ui, stream)
321 bundle2.processbundle(self._repo, b)
321 bundle2.processbundle(self._repo, b)
322 raise
322 raise
323 except error.PushRaced as exc:
323 except error.PushRaced as exc:
324 raise error.ResponseError(_('push failed:'),
324 raise error.ResponseError(_('push failed:'),
325 stringutil.forcebytestr(exc))
325 stringutil.forcebytestr(exc))
326
326
327 # End of _basewirecommands interface.
327 # End of _basewirecommands interface.
328
328
329 # Begin of peer interface.
329 # Begin of peer interface.
330
330
331 def commandexecutor(self):
331 def commandexecutor(self):
332 return localcommandexecutor(self)
332 return localcommandexecutor(self)
333
333
334 # End of peer interface.
334 # End of peer interface.
335
335
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 class locallegacypeer(localpeer):
337 class locallegacypeer(localpeer):
338 '''peer extension which implements legacy methods too; used for tests with
338 '''peer extension which implements legacy methods too; used for tests with
339 restricted capabilities'''
339 restricted capabilities'''
340
340
341 def __init__(self, repo):
341 def __init__(self, repo):
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343
343
344 # Begin of baselegacywirecommands interface.
344 # Begin of baselegacywirecommands interface.
345
345
346 def between(self, pairs):
346 def between(self, pairs):
347 return self._repo.between(pairs)
347 return self._repo.between(pairs)
348
348
349 def branches(self, nodes):
349 def branches(self, nodes):
350 return self._repo.branches(nodes)
350 return self._repo.branches(nodes)
351
351
352 def changegroup(self, nodes, source):
352 def changegroup(self, nodes, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 missingheads=self._repo.heads())
354 missingheads=self._repo.heads())
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 def changegroupsubset(self, bases, heads, source):
357 def changegroupsubset(self, bases, heads, source):
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 missingheads=heads)
359 missingheads=heads)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361
361
362 # End of baselegacywirecommands interface.
362 # End of baselegacywirecommands interface.
363
363
364 # Increment the sub-version when the revlog v2 format changes to lock out old
364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 # clients.
365 # clients.
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
367
367
368 # A repository with the sparserevlog feature will have delta chains that
368 # A repository with the sparserevlog feature will have delta chains that
369 # can spread over a larger span. Sparse reading cuts these large spans into
369 # can spread over a larger span. Sparse reading cuts these large spans into
370 # pieces, so that each piece isn't too big.
370 # pieces, so that each piece isn't too big.
371 # Without the sparserevlog capability, reading from the repository could use
371 # Without the sparserevlog capability, reading from the repository could use
372 # huge amounts of memory, because the whole span would be read at once,
372 # huge amounts of memory, because the whole span would be read at once,
373 # including all the intermediate revisions that aren't pertinent for the chain.
373 # including all the intermediate revisions that aren't pertinent for the chain.
374 # This is why once a repository has enabled sparse-read, it becomes required.
374 # This is why once a repository has enabled sparse-read, it becomes required.
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376
376
377 # Functions receiving (ui, features) that extensions can register to impact
377 # Functions receiving (ui, features) that extensions can register to impact
378 # the ability to load repositories with custom requirements. Only
378 # the ability to load repositories with custom requirements. Only
379 # functions defined in loaded extensions are called.
379 # functions defined in loaded extensions are called.
380 #
380 #
381 # The function receives a set of requirement strings that the repository
381 # The function receives a set of requirement strings that the repository
382 # is capable of opening. Functions will typically add elements to the
382 # is capable of opening. Functions will typically add elements to the
383 # set to reflect that the extension knows how to handle that requirements.
383 # set to reflect that the extension knows how to handle that requirements.
384 featuresetupfuncs = set()
384 featuresetupfuncs = set()
385
385
386 def makelocalrepository(baseui, path, intents=None):
386 def makelocalrepository(baseui, path, intents=None):
387 """Create a local repository object.
387 """Create a local repository object.
388
388
389 Given arguments needed to construct a local repository, this function
389 Given arguments needed to construct a local repository, this function
390 performs various early repository loading functionality (such as
390 performs various early repository loading functionality (such as
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 the repository can be opened, derives a type suitable for representing
392 the repository can be opened, derives a type suitable for representing
393 that repository, and returns an instance of it.
393 that repository, and returns an instance of it.
394
394
395 The returned object conforms to the ``repository.completelocalrepository``
395 The returned object conforms to the ``repository.completelocalrepository``
396 interface.
396 interface.
397
397
398 The repository type is derived by calling a series of factory functions
398 The repository type is derived by calling a series of factory functions
399 for each aspect/interface of the final repository. These are defined by
399 for each aspect/interface of the final repository. These are defined by
400 ``REPO_INTERFACES``.
400 ``REPO_INTERFACES``.
401
401
402 Each factory function is called to produce a type implementing a specific
402 Each factory function is called to produce a type implementing a specific
403 interface. The cumulative list of returned types will be combined into a
403 interface. The cumulative list of returned types will be combined into a
404 new type and that type will be instantiated to represent the local
404 new type and that type will be instantiated to represent the local
405 repository.
405 repository.
406
406
407 The factory functions each receive various state that may be consulted
407 The factory functions each receive various state that may be consulted
408 as part of deriving a type.
408 as part of deriving a type.
409
409
410 Extensions should wrap these factory functions to customize repository type
410 Extensions should wrap these factory functions to customize repository type
411 creation. Note that an extension's wrapped function may be called even if
411 creation. Note that an extension's wrapped function may be called even if
412 that extension is not loaded for the repo being constructed. Extensions
412 that extension is not loaded for the repo being constructed. Extensions
413 should check if their ``__name__`` appears in the
413 should check if their ``__name__`` appears in the
414 ``extensionmodulenames`` set passed to the factory function and no-op if
414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 not.
415 not.
416 """
416 """
417 ui = baseui.copy()
417 ui = baseui.copy()
418 # Prevent copying repo configuration.
418 # Prevent copying repo configuration.
419 ui.copy = baseui.copy
419 ui.copy = baseui.copy
420
420
421 # Working directory VFS rooted at repository root.
421 # Working directory VFS rooted at repository root.
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423
423
424 # Main VFS for .hg/ directory.
424 # Main VFS for .hg/ directory.
425 hgpath = wdirvfs.join(b'.hg')
425 hgpath = wdirvfs.join(b'.hg')
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427
427
428 # The .hg/ path should exist and should be a directory. All other
428 # The .hg/ path should exist and should be a directory. All other
429 # cases are errors.
429 # cases are errors.
430 if not hgvfs.isdir():
430 if not hgvfs.isdir():
431 try:
431 try:
432 hgvfs.stat()
432 hgvfs.stat()
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436
436
437 raise error.RepoError(_(b'repository %s not found') % path)
437 raise error.RepoError(_(b'repository %s not found') % path)
438
438
439 # .hg/requires file contains a newline-delimited list of
439 # .hg/requires file contains a newline-delimited list of
440 # features/capabilities the opener (us) must have in order to use
440 # features/capabilities the opener (us) must have in order to use
441 # the repository. This file was introduced in Mercurial 0.9.2,
441 # the repository. This file was introduced in Mercurial 0.9.2,
442 # which means very old repositories may not have one. We assume
442 # which means very old repositories may not have one. We assume
443 # a missing file translates to no requirements.
443 # a missing file translates to no requirements.
444 try:
444 try:
445 requirements = set(hgvfs.read(b'requires').splitlines())
445 requirements = set(hgvfs.read(b'requires').splitlines())
446 except IOError as e:
446 except IOError as e:
447 if e.errno != errno.ENOENT:
447 if e.errno != errno.ENOENT:
448 raise
448 raise
449 requirements = set()
449 requirements = set()
450
450
451 # The .hg/hgrc file may load extensions or contain config options
451 # The .hg/hgrc file may load extensions or contain config options
452 # that influence repository construction. Attempt to load it and
452 # that influence repository construction. Attempt to load it and
453 # process any new extensions that it may have pulled in.
453 # process any new extensions that it may have pulled in.
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 extensions.loadall(ui)
456 extensions.loadall(ui)
457 extensions.populateui(ui)
457 extensions.populateui(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511 wcachepath = hgvfs.join(b'wcache')
511 wcachepath = hgvfs.join(b'wcache')
512
512
513
513
514 # The store has changed over time and the exact layout is dictated by
514 # The store has changed over time and the exact layout is dictated by
515 # requirements. The store interface abstracts differences across all
515 # requirements. The store interface abstracts differences across all
516 # of them.
516 # of them.
517 store = makestore(requirements, storebasepath,
517 store = makestore(requirements, storebasepath,
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 hgvfs.createmode = store.createmode
519 hgvfs.createmode = store.createmode
520
520
521 storevfs = store.vfs
521 storevfs = store.vfs
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523
523
524 # The cache vfs is used to manage cache files.
524 # The cache vfs is used to manage cache files.
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 cachevfs.createmode = store.createmode
526 cachevfs.createmode = store.createmode
527 # The cache vfs is used to manage cache files related to the working copy
527 # The cache vfs is used to manage cache files related to the working copy
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 wcachevfs.createmode = store.createmode
529 wcachevfs.createmode = store.createmode
530
530
531 # Now resolve the type for the repository object. We do this by repeatedly
531 # Now resolve the type for the repository object. We do this by repeatedly
532 # calling a factory function to produces types for specific aspects of the
532 # calling a factory function to produces types for specific aspects of the
533 # repo's operation. The aggregate returned types are used as base classes
533 # repo's operation. The aggregate returned types are used as base classes
534 # for a dynamically-derived type, which will represent our new repository.
534 # for a dynamically-derived type, which will represent our new repository.
535
535
536 bases = []
536 bases = []
537 extrastate = {}
537 extrastate = {}
538
538
539 for iface, fn in REPO_INTERFACES:
539 for iface, fn in REPO_INTERFACES:
540 # We pass all potentially useful state to give extensions tons of
540 # We pass all potentially useful state to give extensions tons of
541 # flexibility.
541 # flexibility.
542 typ = fn()(ui=ui,
542 typ = fn()(ui=ui,
543 intents=intents,
543 intents=intents,
544 requirements=requirements,
544 requirements=requirements,
545 features=features,
545 features=features,
546 wdirvfs=wdirvfs,
546 wdirvfs=wdirvfs,
547 hgvfs=hgvfs,
547 hgvfs=hgvfs,
548 store=store,
548 store=store,
549 storevfs=storevfs,
549 storevfs=storevfs,
550 storeoptions=storevfs.options,
550 storeoptions=storevfs.options,
551 cachevfs=cachevfs,
551 cachevfs=cachevfs,
552 wcachevfs=wcachevfs,
552 wcachevfs=wcachevfs,
553 extensionmodulenames=extensionmodulenames,
553 extensionmodulenames=extensionmodulenames,
554 extrastate=extrastate,
554 extrastate=extrastate,
555 baseclasses=bases)
555 baseclasses=bases)
556
556
557 if not isinstance(typ, type):
557 if not isinstance(typ, type):
558 raise error.ProgrammingError('unable to construct type for %s' %
558 raise error.ProgrammingError('unable to construct type for %s' %
559 iface)
559 iface)
560
560
561 bases.append(typ)
561 bases.append(typ)
562
562
563 # type() allows you to use characters in type names that wouldn't be
563 # type() allows you to use characters in type names that wouldn't be
564 # recognized as Python symbols in source code. We abuse that to add
564 # recognized as Python symbols in source code. We abuse that to add
565 # rich information about our constructed repo.
565 # rich information about our constructed repo.
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 wdirvfs.base,
567 wdirvfs.base,
568 b','.join(sorted(requirements))))
568 b','.join(sorted(requirements))))
569
569
570 cls = type(name, tuple(bases), {})
570 cls = type(name, tuple(bases), {})
571
571
572 return cls(
572 return cls(
573 baseui=baseui,
573 baseui=baseui,
574 ui=ui,
574 ui=ui,
575 origroot=path,
575 origroot=path,
576 wdirvfs=wdirvfs,
576 wdirvfs=wdirvfs,
577 hgvfs=hgvfs,
577 hgvfs=hgvfs,
578 requirements=requirements,
578 requirements=requirements,
579 supportedrequirements=supportedrequirements,
579 supportedrequirements=supportedrequirements,
580 sharedpath=storebasepath,
580 sharedpath=storebasepath,
581 store=store,
581 store=store,
582 cachevfs=cachevfs,
582 cachevfs=cachevfs,
583 wcachevfs=wcachevfs,
583 wcachevfs=wcachevfs,
584 features=features,
584 features=features,
585 intents=intents)
585 intents=intents)
586
586
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 """Load hgrc files/content into a ui instance.
588 """Load hgrc files/content into a ui instance.
589
589
590 This is called during repository opening to load any additional
590 This is called during repository opening to load any additional
591 config files or settings relevant to the current repository.
591 config files or settings relevant to the current repository.
592
592
593 Returns a bool indicating whether any additional configs were loaded.
593 Returns a bool indicating whether any additional configs were loaded.
594
594
595 Extensions should monkeypatch this function to modify how per-repo
595 Extensions should monkeypatch this function to modify how per-repo
596 configs are loaded. For example, an extension may wish to pull in
596 configs are loaded. For example, an extension may wish to pull in
597 configs from alternate files or sources.
597 configs from alternate files or sources.
598 """
598 """
599 try:
599 try:
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 return True
601 return True
602 except IOError:
602 except IOError:
603 return False
603 return False
604
604
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 """Perform additional actions after .hg/hgrc is loaded.
606 """Perform additional actions after .hg/hgrc is loaded.
607
607
608 This function is called during repository loading immediately after
608 This function is called during repository loading immediately after
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610
610
611 The function can be used to validate configs, automatically add
611 The function can be used to validate configs, automatically add
612 options (including extensions) based on requirements, etc.
612 options (including extensions) based on requirements, etc.
613 """
613 """
614
614
615 # Map of requirements to list of extensions to load automatically when
615 # Map of requirements to list of extensions to load automatically when
616 # requirement is present.
616 # requirement is present.
617 autoextensions = {
617 autoextensions = {
618 b'largefiles': [b'largefiles'],
618 b'largefiles': [b'largefiles'],
619 b'lfs': [b'lfs'],
619 b'lfs': [b'lfs'],
620 }
620 }
621
621
622 for requirement, names in sorted(autoextensions.items()):
622 for requirement, names in sorted(autoextensions.items()):
623 if requirement not in requirements:
623 if requirement not in requirements:
624 continue
624 continue
625
625
626 for name in names:
626 for name in names:
627 if not ui.hasconfig(b'extensions', name):
627 if not ui.hasconfig(b'extensions', name):
628 ui.setconfig(b'extensions', name, b'', source='autoload')
628 ui.setconfig(b'extensions', name, b'', source='autoload')
629
629
630 def gathersupportedrequirements(ui):
630 def gathersupportedrequirements(ui):
631 """Determine the complete set of recognized requirements."""
631 """Determine the complete set of recognized requirements."""
632 # Start with all requirements supported by this file.
632 # Start with all requirements supported by this file.
633 supported = set(localrepository._basesupported)
633 supported = set(localrepository._basesupported)
634
634
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 # relevant to this ui instance.
636 # relevant to this ui instance.
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638
638
639 for fn in featuresetupfuncs:
639 for fn in featuresetupfuncs:
640 if fn.__module__ in modules:
640 if fn.__module__ in modules:
641 fn(ui, supported)
641 fn(ui, supported)
642
642
643 # Add derived requirements from registered compression engines.
643 # Add derived requirements from registered compression engines.
644 for name in util.compengines:
644 for name in util.compengines:
645 engine = util.compengines[name]
645 engine = util.compengines[name]
646 if engine.revlogheader():
646 if engine.revlogheader():
647 supported.add(b'exp-compression-%s' % name)
647 supported.add(b'exp-compression-%s' % name)
648
648
649 return supported
649 return supported
650
650
651 def ensurerequirementsrecognized(requirements, supported):
651 def ensurerequirementsrecognized(requirements, supported):
652 """Validate that a set of local requirements is recognized.
652 """Validate that a set of local requirements is recognized.
653
653
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 exists any requirement in that set that currently loaded code doesn't
655 exists any requirement in that set that currently loaded code doesn't
656 recognize.
656 recognize.
657
657
658 Returns a set of supported requirements.
658 Returns a set of supported requirements.
659 """
659 """
660 missing = set()
660 missing = set()
661
661
662 for requirement in requirements:
662 for requirement in requirements:
663 if requirement in supported:
663 if requirement in supported:
664 continue
664 continue
665
665
666 if not requirement or not requirement[0:1].isalnum():
666 if not requirement or not requirement[0:1].isalnum():
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668
668
669 missing.add(requirement)
669 missing.add(requirement)
670
670
671 if missing:
671 if missing:
672 raise error.RequirementError(
672 raise error.RequirementError(
673 _(b'repository requires features unknown to this Mercurial: %s') %
673 _(b'repository requires features unknown to this Mercurial: %s') %
674 b' '.join(sorted(missing)),
674 b' '.join(sorted(missing)),
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 b'for more information'))
676 b'for more information'))
677
677
678 def ensurerequirementscompatible(ui, requirements):
678 def ensurerequirementscompatible(ui, requirements):
679 """Validates that a set of recognized requirements is mutually compatible.
679 """Validates that a set of recognized requirements is mutually compatible.
680
680
681 Some requirements may not be compatible with others or require
681 Some requirements may not be compatible with others or require
682 config options that aren't enabled. This function is called during
682 config options that aren't enabled. This function is called during
683 repository opening to ensure that the set of requirements needed
683 repository opening to ensure that the set of requirements needed
684 to open a repository is sane and compatible with config options.
684 to open a repository is sane and compatible with config options.
685
685
686 Extensions can monkeypatch this function to perform additional
686 Extensions can monkeypatch this function to perform additional
687 checking.
687 checking.
688
688
689 ``error.RepoError`` should be raised on failure.
689 ``error.RepoError`` should be raised on failure.
690 """
690 """
691 if b'exp-sparse' in requirements and not sparse.enabled:
691 if b'exp-sparse' in requirements and not sparse.enabled:
692 raise error.RepoError(_(b'repository is using sparse feature but '
692 raise error.RepoError(_(b'repository is using sparse feature but '
693 b'sparse is not enabled; enable the '
693 b'sparse is not enabled; enable the '
694 b'"sparse" extensions to access'))
694 b'"sparse" extensions to access'))
695
695
696 def makestore(requirements, path, vfstype):
696 def makestore(requirements, path, vfstype):
697 """Construct a storage object for a repository."""
697 """Construct a storage object for a repository."""
698 if b'store' in requirements:
698 if b'store' in requirements:
699 if b'fncache' in requirements:
699 if b'fncache' in requirements:
700 return storemod.fncachestore(path, vfstype,
700 return storemod.fncachestore(path, vfstype,
701 b'dotencode' in requirements)
701 b'dotencode' in requirements)
702
702
703 return storemod.encodedstore(path, vfstype)
703 return storemod.encodedstore(path, vfstype)
704
704
705 return storemod.basicstore(path, vfstype)
705 return storemod.basicstore(path, vfstype)
706
706
707 def resolvestorevfsoptions(ui, requirements, features):
707 def resolvestorevfsoptions(ui, requirements, features):
708 """Resolve the options to pass to the store vfs opener.
708 """Resolve the options to pass to the store vfs opener.
709
709
710 The returned dict is used to influence behavior of the storage layer.
710 The returned dict is used to influence behavior of the storage layer.
711 """
711 """
712 options = {}
712 options = {}
713
713
714 if b'treemanifest' in requirements:
714 if b'treemanifest' in requirements:
715 options[b'treemanifest'] = True
715 options[b'treemanifest'] = True
716
716
717 # experimental config: format.manifestcachesize
717 # experimental config: format.manifestcachesize
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 if manifestcachesize is not None:
719 if manifestcachesize is not None:
720 options[b'manifestcachesize'] = manifestcachesize
720 options[b'manifestcachesize'] = manifestcachesize
721
721
722 # In the absence of another requirement superseding a revlog-related
722 # In the absence of another requirement superseding a revlog-related
723 # requirement, we have to assume the repo is using revlog version 0.
723 # requirement, we have to assume the repo is using revlog version 0.
724 # This revlog format is super old and we don't bother trying to parse
724 # This revlog format is super old and we don't bother trying to parse
725 # opener options for it because those options wouldn't do anything
725 # opener options for it because those options wouldn't do anything
726 # meaningful on such old repos.
726 # meaningful on such old repos.
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729
729
730 return options
730 return options
731
731
732 def resolverevlogstorevfsoptions(ui, requirements, features):
732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 """Resolve opener options specific to revlogs."""
733 """Resolve opener options specific to revlogs."""
734
734
735 options = {}
735 options = {}
736 options[b'flagprocessors'] = {}
736 options[b'flagprocessors'] = {}
737
737
738 if b'revlogv1' in requirements:
738 if b'revlogv1' in requirements:
739 options[b'revlogv1'] = True
739 options[b'revlogv1'] = True
740 if REVLOGV2_REQUIREMENT in requirements:
740 if REVLOGV2_REQUIREMENT in requirements:
741 options[b'revlogv2'] = True
741 options[b'revlogv2'] = True
742
742
743 if b'generaldelta' in requirements:
743 if b'generaldelta' in requirements:
744 options[b'generaldelta'] = True
744 options[b'generaldelta'] = True
745
745
746 # experimental config: format.chunkcachesize
746 # experimental config: format.chunkcachesize
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 if chunkcachesize is not None:
748 if chunkcachesize is not None:
749 options[b'chunkcachesize'] = chunkcachesize
749 options[b'chunkcachesize'] = chunkcachesize
750
750
751 deltabothparents = ui.configbool(b'storage',
751 deltabothparents = ui.configbool(b'storage',
752 b'revlog.optimize-delta-parent-choice')
752 b'revlog.optimize-delta-parent-choice')
753 options[b'deltabothparents'] = deltabothparents
753 options[b'deltabothparents'] = deltabothparents
754
754
755 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
755 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
756
756
757 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
757 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
758 if 0 <= chainspan:
758 if 0 <= chainspan:
759 options[b'maxdeltachainspan'] = chainspan
759 options[b'maxdeltachainspan'] = chainspan
760
760
761 mmapindexthreshold = ui.configbytes(b'storage', b'mmap-threshold')
761 mmapindexthreshold = ui.configbytes(b'storage', b'mmap-threshold')
762 if mmapindexthreshold is not None:
762 if mmapindexthreshold is not None:
763 options[b'mmapindexthreshold'] = mmapindexthreshold
763 options[b'mmapindexthreshold'] = mmapindexthreshold
764
764
765 withsparseread = ui.configbool(b'experimental', b'sparse-read')
765 withsparseread = ui.configbool(b'experimental', b'sparse-read')
766 srdensitythres = float(ui.config(b'experimental',
766 srdensitythres = float(ui.config(b'experimental',
767 b'sparse-read.density-threshold'))
767 b'sparse-read.density-threshold'))
768 srmingapsize = ui.configbytes(b'experimental',
768 srmingapsize = ui.configbytes(b'experimental',
769 b'sparse-read.min-gap-size')
769 b'sparse-read.min-gap-size')
770 options[b'with-sparse-read'] = withsparseread
770 options[b'with-sparse-read'] = withsparseread
771 options[b'sparse-read-density-threshold'] = srdensitythres
771 options[b'sparse-read-density-threshold'] = srdensitythres
772 options[b'sparse-read-min-gap-size'] = srmingapsize
772 options[b'sparse-read-min-gap-size'] = srmingapsize
773
773
774 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
774 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
775 options[b'sparse-revlog'] = sparserevlog
775 options[b'sparse-revlog'] = sparserevlog
776 if sparserevlog:
776 if sparserevlog:
777 options[b'generaldelta'] = True
777 options[b'generaldelta'] = True
778
778
779 maxchainlen = None
779 maxchainlen = None
780 if sparserevlog:
780 if sparserevlog:
781 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
781 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
782 # experimental config: format.maxchainlen
782 # experimental config: format.maxchainlen
783 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
783 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
784 if maxchainlen is not None:
784 if maxchainlen is not None:
785 options[b'maxchainlen'] = maxchainlen
785 options[b'maxchainlen'] = maxchainlen
786
786
787 for r in requirements:
787 for r in requirements:
788 if r.startswith(b'exp-compression-'):
788 if r.startswith(b'exp-compression-'):
789 options[b'compengine'] = r[len(b'exp-compression-'):]
789 options[b'compengine'] = r[len(b'exp-compression-'):]
790
790
791 if repository.NARROW_REQUIREMENT in requirements:
791 if repository.NARROW_REQUIREMENT in requirements:
792 options[b'enableellipsis'] = True
792 options[b'enableellipsis'] = True
793
793
794 return options
794 return options
795
795
796 def makemain(**kwargs):
796 def makemain(**kwargs):
797 """Produce a type conforming to ``ilocalrepositorymain``."""
797 """Produce a type conforming to ``ilocalrepositorymain``."""
798 return localrepository
798 return localrepository
799
799
800 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
800 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
801 class revlogfilestorage(object):
801 class revlogfilestorage(object):
802 """File storage when using revlogs."""
802 """File storage when using revlogs."""
803
803
804 def file(self, path):
804 def file(self, path):
805 if path[0] == b'/':
805 if path[0] == b'/':
806 path = path[1:]
806 path = path[1:]
807
807
808 return filelog.filelog(self.svfs, path)
808 return filelog.filelog(self.svfs, path)
809
809
810 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
810 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
811 class revlognarrowfilestorage(object):
811 class revlognarrowfilestorage(object):
812 """File storage when using revlogs and narrow files."""
812 """File storage when using revlogs and narrow files."""
813
813
814 def file(self, path):
814 def file(self, path):
815 if path[0] == b'/':
815 if path[0] == b'/':
816 path = path[1:]
816 path = path[1:]
817
817
818 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
818 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
819
819
820 def makefilestorage(requirements, features, **kwargs):
820 def makefilestorage(requirements, features, **kwargs):
821 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
821 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
822 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
822 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
823 features.add(repository.REPO_FEATURE_STREAM_CLONE)
823 features.add(repository.REPO_FEATURE_STREAM_CLONE)
824
824
825 if repository.NARROW_REQUIREMENT in requirements:
825 if repository.NARROW_REQUIREMENT in requirements:
826 return revlognarrowfilestorage
826 return revlognarrowfilestorage
827 else:
827 else:
828 return revlogfilestorage
828 return revlogfilestorage
829
829
830 # List of repository interfaces and factory functions for them. Each
830 # List of repository interfaces and factory functions for them. Each
831 # will be called in order during ``makelocalrepository()`` to iteratively
831 # will be called in order during ``makelocalrepository()`` to iteratively
832 # derive the final type for a local repository instance. We capture the
832 # derive the final type for a local repository instance. We capture the
833 # function as a lambda so we don't hold a reference and the module-level
833 # function as a lambda so we don't hold a reference and the module-level
834 # functions can be wrapped.
834 # functions can be wrapped.
835 REPO_INTERFACES = [
835 REPO_INTERFACES = [
836 (repository.ilocalrepositorymain, lambda: makemain),
836 (repository.ilocalrepositorymain, lambda: makemain),
837 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
837 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
838 ]
838 ]
839
839
840 @interfaceutil.implementer(repository.ilocalrepositorymain)
840 @interfaceutil.implementer(repository.ilocalrepositorymain)
841 class localrepository(object):
841 class localrepository(object):
842 """Main class for representing local repositories.
842 """Main class for representing local repositories.
843
843
844 All local repositories are instances of this class.
844 All local repositories are instances of this class.
845
845
846 Constructed on its own, instances of this class are not usable as
846 Constructed on its own, instances of this class are not usable as
847 repository objects. To obtain a usable repository object, call
847 repository objects. To obtain a usable repository object, call
848 ``hg.repository()``, ``localrepo.instance()``, or
848 ``hg.repository()``, ``localrepo.instance()``, or
849 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
849 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
850 ``instance()`` adds support for creating new repositories.
850 ``instance()`` adds support for creating new repositories.
851 ``hg.repository()`` adds more extension integration, including calling
851 ``hg.repository()`` adds more extension integration, including calling
852 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
852 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
853 used.
853 used.
854 """
854 """
855
855
856 # obsolete experimental requirements:
856 # obsolete experimental requirements:
857 # - manifestv2: An experimental new manifest format that allowed
857 # - manifestv2: An experimental new manifest format that allowed
858 # for stem compression of long paths. Experiment ended up not
858 # for stem compression of long paths. Experiment ended up not
859 # being successful (repository sizes went up due to worse delta
859 # being successful (repository sizes went up due to worse delta
860 # chains), and the code was deleted in 4.6.
860 # chains), and the code was deleted in 4.6.
861 supportedformats = {
861 supportedformats = {
862 'revlogv1',
862 'revlogv1',
863 'generaldelta',
863 'generaldelta',
864 'treemanifest',
864 'treemanifest',
865 REVLOGV2_REQUIREMENT,
865 REVLOGV2_REQUIREMENT,
866 SPARSEREVLOG_REQUIREMENT,
866 SPARSEREVLOG_REQUIREMENT,
867 }
867 }
868 _basesupported = supportedformats | {
868 _basesupported = supportedformats | {
869 'store',
869 'store',
870 'fncache',
870 'fncache',
871 'shared',
871 'shared',
872 'relshared',
872 'relshared',
873 'dotencode',
873 'dotencode',
874 'exp-sparse',
874 'exp-sparse',
875 'internal-phase'
875 'internal-phase'
876 }
876 }
877
877
878 # list of prefix for file which can be written without 'wlock'
878 # list of prefix for file which can be written without 'wlock'
879 # Extensions should extend this list when needed
879 # Extensions should extend this list when needed
880 _wlockfreeprefix = {
880 _wlockfreeprefix = {
881 # We migh consider requiring 'wlock' for the next
881 # We migh consider requiring 'wlock' for the next
882 # two, but pretty much all the existing code assume
882 # two, but pretty much all the existing code assume
883 # wlock is not needed so we keep them excluded for
883 # wlock is not needed so we keep them excluded for
884 # now.
884 # now.
885 'hgrc',
885 'hgrc',
886 'requires',
886 'requires',
887 # XXX cache is a complicatged business someone
887 # XXX cache is a complicatged business someone
888 # should investigate this in depth at some point
888 # should investigate this in depth at some point
889 'cache/',
889 'cache/',
890 # XXX shouldn't be dirstate covered by the wlock?
890 # XXX shouldn't be dirstate covered by the wlock?
891 'dirstate',
891 'dirstate',
892 # XXX bisect was still a bit too messy at the time
892 # XXX bisect was still a bit too messy at the time
893 # this changeset was introduced. Someone should fix
893 # this changeset was introduced. Someone should fix
894 # the remainig bit and drop this line
894 # the remainig bit and drop this line
895 'bisect.state',
895 'bisect.state',
896 }
896 }
897
897
898 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
898 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
899 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
899 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
900 features, intents=None):
900 features, intents=None):
901 """Create a new local repository instance.
901 """Create a new local repository instance.
902
902
903 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
903 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
904 or ``localrepo.makelocalrepository()`` for obtaining a new repository
904 or ``localrepo.makelocalrepository()`` for obtaining a new repository
905 object.
905 object.
906
906
907 Arguments:
907 Arguments:
908
908
909 baseui
909 baseui
910 ``ui.ui`` instance that ``ui`` argument was based off of.
910 ``ui.ui`` instance that ``ui`` argument was based off of.
911
911
912 ui
912 ui
913 ``ui.ui`` instance for use by the repository.
913 ``ui.ui`` instance for use by the repository.
914
914
915 origroot
915 origroot
916 ``bytes`` path to working directory root of this repository.
916 ``bytes`` path to working directory root of this repository.
917
917
918 wdirvfs
918 wdirvfs
919 ``vfs.vfs`` rooted at the working directory.
919 ``vfs.vfs`` rooted at the working directory.
920
920
921 hgvfs
921 hgvfs
922 ``vfs.vfs`` rooted at .hg/
922 ``vfs.vfs`` rooted at .hg/
923
923
924 requirements
924 requirements
925 ``set`` of bytestrings representing repository opening requirements.
925 ``set`` of bytestrings representing repository opening requirements.
926
926
927 supportedrequirements
927 supportedrequirements
928 ``set`` of bytestrings representing repository requirements that we
928 ``set`` of bytestrings representing repository requirements that we
929 know how to open. May be a supetset of ``requirements``.
929 know how to open. May be a supetset of ``requirements``.
930
930
931 sharedpath
931 sharedpath
932 ``bytes`` Defining path to storage base directory. Points to a
932 ``bytes`` Defining path to storage base directory. Points to a
933 ``.hg/`` directory somewhere.
933 ``.hg/`` directory somewhere.
934
934
935 store
935 store
936 ``store.basicstore`` (or derived) instance providing access to
936 ``store.basicstore`` (or derived) instance providing access to
937 versioned storage.
937 versioned storage.
938
938
939 cachevfs
939 cachevfs
940 ``vfs.vfs`` used for cache files.
940 ``vfs.vfs`` used for cache files.
941
941
942 wcachevfs
942 wcachevfs
943 ``vfs.vfs`` used for cache files related to the working copy.
943 ``vfs.vfs`` used for cache files related to the working copy.
944
944
945 features
945 features
946 ``set`` of bytestrings defining features/capabilities of this
946 ``set`` of bytestrings defining features/capabilities of this
947 instance.
947 instance.
948
948
949 intents
949 intents
950 ``set`` of system strings indicating what this repo will be used
950 ``set`` of system strings indicating what this repo will be used
951 for.
951 for.
952 """
952 """
953 self.baseui = baseui
953 self.baseui = baseui
954 self.ui = ui
954 self.ui = ui
955 self.origroot = origroot
955 self.origroot = origroot
956 # vfs rooted at working directory.
956 # vfs rooted at working directory.
957 self.wvfs = wdirvfs
957 self.wvfs = wdirvfs
958 self.root = wdirvfs.base
958 self.root = wdirvfs.base
959 # vfs rooted at .hg/. Used to access most non-store paths.
959 # vfs rooted at .hg/. Used to access most non-store paths.
960 self.vfs = hgvfs
960 self.vfs = hgvfs
961 self.path = hgvfs.base
961 self.path = hgvfs.base
962 self.requirements = requirements
962 self.requirements = requirements
963 self.supported = supportedrequirements
963 self.supported = supportedrequirements
964 self.sharedpath = sharedpath
964 self.sharedpath = sharedpath
965 self.store = store
965 self.store = store
966 self.cachevfs = cachevfs
966 self.cachevfs = cachevfs
967 self.wcachevfs = wcachevfs
967 self.wcachevfs = wcachevfs
968 self.features = features
968 self.features = features
969
969
970 self.filtername = None
970 self.filtername = None
971
971
972 if (self.ui.configbool('devel', 'all-warnings') or
972 if (self.ui.configbool('devel', 'all-warnings') or
973 self.ui.configbool('devel', 'check-locks')):
973 self.ui.configbool('devel', 'check-locks')):
974 self.vfs.audit = self._getvfsward(self.vfs.audit)
974 self.vfs.audit = self._getvfsward(self.vfs.audit)
975 # A list of callback to shape the phase if no data were found.
975 # A list of callback to shape the phase if no data were found.
976 # Callback are in the form: func(repo, roots) --> processed root.
976 # Callback are in the form: func(repo, roots) --> processed root.
977 # This list it to be filled by extension during repo setup
977 # This list it to be filled by extension during repo setup
978 self._phasedefaults = []
978 self._phasedefaults = []
979
979
980 color.setup(self.ui)
980 color.setup(self.ui)
981
981
982 self.spath = self.store.path
982 self.spath = self.store.path
983 self.svfs = self.store.vfs
983 self.svfs = self.store.vfs
984 self.sjoin = self.store.join
984 self.sjoin = self.store.join
985 if (self.ui.configbool('devel', 'all-warnings') or
985 if (self.ui.configbool('devel', 'all-warnings') or
986 self.ui.configbool('devel', 'check-locks')):
986 self.ui.configbool('devel', 'check-locks')):
987 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
987 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
988 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
988 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
989 else: # standard vfs
989 else: # standard vfs
990 self.svfs.audit = self._getsvfsward(self.svfs.audit)
990 self.svfs.audit = self._getsvfsward(self.svfs.audit)
991
991
992 self._dirstatevalidatewarned = False
992 self._dirstatevalidatewarned = False
993
993
994 self._branchcaches = {}
994 self._branchcaches = {}
995 self._revbranchcache = None
995 self._revbranchcache = None
996 self._filterpats = {}
996 self._filterpats = {}
997 self._datafilters = {}
997 self._datafilters = {}
998 self._transref = self._lockref = self._wlockref = None
998 self._transref = self._lockref = self._wlockref = None
999
999
1000 # A cache for various files under .hg/ that tracks file changes,
1000 # A cache for various files under .hg/ that tracks file changes,
1001 # (used by the filecache decorator)
1001 # (used by the filecache decorator)
1002 #
1002 #
1003 # Maps a property name to its util.filecacheentry
1003 # Maps a property name to its util.filecacheentry
1004 self._filecache = {}
1004 self._filecache = {}
1005
1005
1006 # hold sets of revision to be filtered
1006 # hold sets of revision to be filtered
1007 # should be cleared when something might have changed the filter value:
1007 # should be cleared when something might have changed the filter value:
1008 # - new changesets,
1008 # - new changesets,
1009 # - phase change,
1009 # - phase change,
1010 # - new obsolescence marker,
1010 # - new obsolescence marker,
1011 # - working directory parent change,
1011 # - working directory parent change,
1012 # - bookmark changes
1012 # - bookmark changes
1013 self.filteredrevcache = {}
1013 self.filteredrevcache = {}
1014
1014
1015 # post-dirstate-status hooks
1015 # post-dirstate-status hooks
1016 self._postdsstatus = []
1016 self._postdsstatus = []
1017
1017
1018 # generic mapping between names and nodes
1018 # generic mapping between names and nodes
1019 self.names = namespaces.namespaces()
1019 self.names = namespaces.namespaces()
1020
1020
1021 # Key to signature value.
1021 # Key to signature value.
1022 self._sparsesignaturecache = {}
1022 self._sparsesignaturecache = {}
1023 # Signature to cached matcher instance.
1023 # Signature to cached matcher instance.
1024 self._sparsematchercache = {}
1024 self._sparsematchercache = {}
1025
1025
1026 def _getvfsward(self, origfunc):
1026 def _getvfsward(self, origfunc):
1027 """build a ward for self.vfs"""
1027 """build a ward for self.vfs"""
1028 rref = weakref.ref(self)
1028 rref = weakref.ref(self)
1029 def checkvfs(path, mode=None):
1029 def checkvfs(path, mode=None):
1030 ret = origfunc(path, mode=mode)
1030 ret = origfunc(path, mode=mode)
1031 repo = rref()
1031 repo = rref()
1032 if (repo is None
1032 if (repo is None
1033 or not util.safehasattr(repo, '_wlockref')
1033 or not util.safehasattr(repo, '_wlockref')
1034 or not util.safehasattr(repo, '_lockref')):
1034 or not util.safehasattr(repo, '_lockref')):
1035 return
1035 return
1036 if mode in (None, 'r', 'rb'):
1036 if mode in (None, 'r', 'rb'):
1037 return
1037 return
1038 if path.startswith(repo.path):
1038 if path.startswith(repo.path):
1039 # truncate name relative to the repository (.hg)
1039 # truncate name relative to the repository (.hg)
1040 path = path[len(repo.path) + 1:]
1040 path = path[len(repo.path) + 1:]
1041 if path.startswith('cache/'):
1041 if path.startswith('cache/'):
1042 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1042 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1043 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1043 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1044 if path.startswith('journal.') or path.startswith('undo.'):
1044 if path.startswith('journal.') or path.startswith('undo.'):
1045 # journal is covered by 'lock'
1045 # journal is covered by 'lock'
1046 if repo._currentlock(repo._lockref) is None:
1046 if repo._currentlock(repo._lockref) is None:
1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1048 stacklevel=3, config='check-locks')
1048 stacklevel=3, config='check-locks')
1049 elif repo._currentlock(repo._wlockref) is None:
1049 elif repo._currentlock(repo._wlockref) is None:
1050 # rest of vfs files are covered by 'wlock'
1050 # rest of vfs files are covered by 'wlock'
1051 #
1051 #
1052 # exclude special files
1052 # exclude special files
1053 for prefix in self._wlockfreeprefix:
1053 for prefix in self._wlockfreeprefix:
1054 if path.startswith(prefix):
1054 if path.startswith(prefix):
1055 return
1055 return
1056 repo.ui.develwarn('write with no wlock: "%s"' % path,
1056 repo.ui.develwarn('write with no wlock: "%s"' % path,
1057 stacklevel=3, config='check-locks')
1057 stacklevel=3, config='check-locks')
1058 return ret
1058 return ret
1059 return checkvfs
1059 return checkvfs
1060
1060
1061 def _getsvfsward(self, origfunc):
1061 def _getsvfsward(self, origfunc):
1062 """build a ward for self.svfs"""
1062 """build a ward for self.svfs"""
1063 rref = weakref.ref(self)
1063 rref = weakref.ref(self)
1064 def checksvfs(path, mode=None):
1064 def checksvfs(path, mode=None):
1065 ret = origfunc(path, mode=mode)
1065 ret = origfunc(path, mode=mode)
1066 repo = rref()
1066 repo = rref()
1067 if repo is None or not util.safehasattr(repo, '_lockref'):
1067 if repo is None or not util.safehasattr(repo, '_lockref'):
1068 return
1068 return
1069 if mode in (None, 'r', 'rb'):
1069 if mode in (None, 'r', 'rb'):
1070 return
1070 return
1071 if path.startswith(repo.sharedpath):
1071 if path.startswith(repo.sharedpath):
1072 # truncate name relative to the repository (.hg)
1072 # truncate name relative to the repository (.hg)
1073 path = path[len(repo.sharedpath) + 1:]
1073 path = path[len(repo.sharedpath) + 1:]
1074 if repo._currentlock(repo._lockref) is None:
1074 if repo._currentlock(repo._lockref) is None:
1075 repo.ui.develwarn('write with no lock: "%s"' % path,
1075 repo.ui.develwarn('write with no lock: "%s"' % path,
1076 stacklevel=4)
1076 stacklevel=4)
1077 return ret
1077 return ret
1078 return checksvfs
1078 return checksvfs
1079
1079
1080 def close(self):
1080 def close(self):
1081 self._writecaches()
1081 self._writecaches()
1082
1082
1083 def _writecaches(self):
1083 def _writecaches(self):
1084 if self._revbranchcache:
1084 if self._revbranchcache:
1085 self._revbranchcache.write()
1085 self._revbranchcache.write()
1086
1086
1087 def _restrictcapabilities(self, caps):
1087 def _restrictcapabilities(self, caps):
1088 if self.ui.configbool('experimental', 'bundle2-advertise'):
1088 if self.ui.configbool('experimental', 'bundle2-advertise'):
1089 caps = set(caps)
1089 caps = set(caps)
1090 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1090 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1091 role='client'))
1091 role='client'))
1092 caps.add('bundle2=' + urlreq.quote(capsblob))
1092 caps.add('bundle2=' + urlreq.quote(capsblob))
1093 return caps
1093 return caps
1094
1094
1095 def _writerequirements(self):
1095 def _writerequirements(self):
1096 scmutil.writerequires(self.vfs, self.requirements)
1096 scmutil.writerequires(self.vfs, self.requirements)
1097
1097
1098 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1098 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1099 # self -> auditor -> self._checknested -> self
1099 # self -> auditor -> self._checknested -> self
1100
1100
1101 @property
1101 @property
1102 def auditor(self):
1102 def auditor(self):
1103 # This is only used by context.workingctx.match in order to
1103 # This is only used by context.workingctx.match in order to
1104 # detect files in subrepos.
1104 # detect files in subrepos.
1105 return pathutil.pathauditor(self.root, callback=self._checknested)
1105 return pathutil.pathauditor(self.root, callback=self._checknested)
1106
1106
1107 @property
1107 @property
1108 def nofsauditor(self):
1108 def nofsauditor(self):
1109 # This is only used by context.basectx.match in order to detect
1109 # This is only used by context.basectx.match in order to detect
1110 # files in subrepos.
1110 # files in subrepos.
1111 return pathutil.pathauditor(self.root, callback=self._checknested,
1111 return pathutil.pathauditor(self.root, callback=self._checknested,
1112 realfs=False, cached=True)
1112 realfs=False, cached=True)
1113
1113
1114 def _checknested(self, path):
1114 def _checknested(self, path):
1115 """Determine if path is a legal nested repository."""
1115 """Determine if path is a legal nested repository."""
1116 if not path.startswith(self.root):
1116 if not path.startswith(self.root):
1117 return False
1117 return False
1118 subpath = path[len(self.root) + 1:]
1118 subpath = path[len(self.root) + 1:]
1119 normsubpath = util.pconvert(subpath)
1119 normsubpath = util.pconvert(subpath)
1120
1120
1121 # XXX: Checking against the current working copy is wrong in
1121 # XXX: Checking against the current working copy is wrong in
1122 # the sense that it can reject things like
1122 # the sense that it can reject things like
1123 #
1123 #
1124 # $ hg cat -r 10 sub/x.txt
1124 # $ hg cat -r 10 sub/x.txt
1125 #
1125 #
1126 # if sub/ is no longer a subrepository in the working copy
1126 # if sub/ is no longer a subrepository in the working copy
1127 # parent revision.
1127 # parent revision.
1128 #
1128 #
1129 # However, it can of course also allow things that would have
1129 # However, it can of course also allow things that would have
1130 # been rejected before, such as the above cat command if sub/
1130 # been rejected before, such as the above cat command if sub/
1131 # is a subrepository now, but was a normal directory before.
1131 # is a subrepository now, but was a normal directory before.
1132 # The old path auditor would have rejected by mistake since it
1132 # The old path auditor would have rejected by mistake since it
1133 # panics when it sees sub/.hg/.
1133 # panics when it sees sub/.hg/.
1134 #
1134 #
1135 # All in all, checking against the working copy seems sensible
1135 # All in all, checking against the working copy seems sensible
1136 # since we want to prevent access to nested repositories on
1136 # since we want to prevent access to nested repositories on
1137 # the filesystem *now*.
1137 # the filesystem *now*.
1138 ctx = self[None]
1138 ctx = self[None]
1139 parts = util.splitpath(subpath)
1139 parts = util.splitpath(subpath)
1140 while parts:
1140 while parts:
1141 prefix = '/'.join(parts)
1141 prefix = '/'.join(parts)
1142 if prefix in ctx.substate:
1142 if prefix in ctx.substate:
1143 if prefix == normsubpath:
1143 if prefix == normsubpath:
1144 return True
1144 return True
1145 else:
1145 else:
1146 sub = ctx.sub(prefix)
1146 sub = ctx.sub(prefix)
1147 return sub.checknested(subpath[len(prefix) + 1:])
1147 return sub.checknested(subpath[len(prefix) + 1:])
1148 else:
1148 else:
1149 parts.pop()
1149 parts.pop()
1150 return False
1150 return False
1151
1151
1152 def peer(self):
1152 def peer(self):
1153 return localpeer(self) # not cached to avoid reference cycle
1153 return localpeer(self) # not cached to avoid reference cycle
1154
1154
1155 def unfiltered(self):
1155 def unfiltered(self):
1156 """Return unfiltered version of the repository
1156 """Return unfiltered version of the repository
1157
1157
1158 Intended to be overwritten by filtered repo."""
1158 Intended to be overwritten by filtered repo."""
1159 return self
1159 return self
1160
1160
1161 def filtered(self, name, visibilityexceptions=None):
1161 def filtered(self, name, visibilityexceptions=None):
1162 """Return a filtered version of a repository"""
1162 """Return a filtered version of a repository"""
1163 cls = repoview.newtype(self.unfiltered().__class__)
1163 cls = repoview.newtype(self.unfiltered().__class__)
1164 return cls(self, name, visibilityexceptions)
1164 return cls(self, name, visibilityexceptions)
1165
1165
1166 @repofilecache('bookmarks', 'bookmarks.current')
1166 @repofilecache('bookmarks', 'bookmarks.current')
1167 def _bookmarks(self):
1167 def _bookmarks(self):
1168 return bookmarks.bmstore(self)
1168 return bookmarks.bmstore(self)
1169
1169
1170 @property
1170 @property
1171 def _activebookmark(self):
1171 def _activebookmark(self):
1172 return self._bookmarks.active
1172 return self._bookmarks.active
1173
1173
1174 # _phasesets depend on changelog. what we need is to call
1174 # _phasesets depend on changelog. what we need is to call
1175 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1175 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1176 # can't be easily expressed in filecache mechanism.
1176 # can't be easily expressed in filecache mechanism.
1177 @storecache('phaseroots', '00changelog.i')
1177 @storecache('phaseroots', '00changelog.i')
1178 def _phasecache(self):
1178 def _phasecache(self):
1179 return phases.phasecache(self, self._phasedefaults)
1179 return phases.phasecache(self, self._phasedefaults)
1180
1180
1181 @storecache('obsstore')
1181 @storecache('obsstore')
1182 def obsstore(self):
1182 def obsstore(self):
1183 return obsolete.makestore(self.ui, self)
1183 return obsolete.makestore(self.ui, self)
1184
1184
1185 @storecache('00changelog.i')
1185 @storecache('00changelog.i')
1186 def changelog(self):
1186 def changelog(self):
1187 return changelog.changelog(self.svfs,
1187 return changelog.changelog(self.svfs,
1188 trypending=txnutil.mayhavepending(self.root))
1188 trypending=txnutil.mayhavepending(self.root))
1189
1189
1190 @storecache('00manifest.i')
1190 @storecache('00manifest.i')
1191 def manifestlog(self):
1191 def manifestlog(self):
1192 rootstore = manifest.manifestrevlog(self.svfs)
1192 rootstore = manifest.manifestrevlog(self.svfs)
1193 return manifest.manifestlog(self.svfs, self, rootstore,
1193 return manifest.manifestlog(self.svfs, self, rootstore,
1194 self.narrowmatch())
1194 self.narrowmatch())
1195
1195
1196 @repofilecache('dirstate')
1196 @repofilecache('dirstate')
1197 def dirstate(self):
1197 def dirstate(self):
1198 return self._makedirstate()
1198 return self._makedirstate()
1199
1199
1200 def _makedirstate(self):
1200 def _makedirstate(self):
1201 """Extension point for wrapping the dirstate per-repo."""
1201 """Extension point for wrapping the dirstate per-repo."""
1202 sparsematchfn = lambda: sparse.matcher(self)
1202 sparsematchfn = lambda: sparse.matcher(self)
1203
1203
1204 return dirstate.dirstate(self.vfs, self.ui, self.root,
1204 return dirstate.dirstate(self.vfs, self.ui, self.root,
1205 self._dirstatevalidate, sparsematchfn)
1205 self._dirstatevalidate, sparsematchfn)
1206
1206
1207 def _dirstatevalidate(self, node):
1207 def _dirstatevalidate(self, node):
1208 try:
1208 try:
1209 self.changelog.rev(node)
1209 self.changelog.rev(node)
1210 return node
1210 return node
1211 except error.LookupError:
1211 except error.LookupError:
1212 if not self._dirstatevalidatewarned:
1212 if not self._dirstatevalidatewarned:
1213 self._dirstatevalidatewarned = True
1213 self._dirstatevalidatewarned = True
1214 self.ui.warn(_("warning: ignoring unknown"
1214 self.ui.warn(_("warning: ignoring unknown"
1215 " working parent %s!\n") % short(node))
1215 " working parent %s!\n") % short(node))
1216 return nullid
1216 return nullid
1217
1217
1218 @storecache(narrowspec.FILENAME)
1218 @storecache(narrowspec.FILENAME)
1219 def narrowpats(self):
1219 def narrowpats(self):
1220 """matcher patterns for this repository's narrowspec
1220 """matcher patterns for this repository's narrowspec
1221
1221
1222 A tuple of (includes, excludes).
1222 A tuple of (includes, excludes).
1223 """
1223 """
1224 return narrowspec.load(self)
1224 return narrowspec.load(self)
1225
1225
1226 @storecache(narrowspec.FILENAME)
1226 @storecache(narrowspec.FILENAME)
1227 def _narrowmatch(self):
1227 def _narrowmatch(self):
1228 if repository.NARROW_REQUIREMENT not in self.requirements:
1228 if repository.NARROW_REQUIREMENT not in self.requirements:
1229 return matchmod.always(self.root, '')
1229 return matchmod.always(self.root, '')
1230 narrowspec.checkworkingcopynarrowspec(self)
1230 include, exclude = self.narrowpats
1231 include, exclude = self.narrowpats
1231 return narrowspec.match(self.root, include=include, exclude=exclude)
1232 return narrowspec.match(self.root, include=include, exclude=exclude)
1232
1233
1233 def narrowmatch(self, match=None, includeexact=False):
1234 def narrowmatch(self, match=None, includeexact=False):
1234 """matcher corresponding the the repo's narrowspec
1235 """matcher corresponding the the repo's narrowspec
1235
1236
1236 If `match` is given, then that will be intersected with the narrow
1237 If `match` is given, then that will be intersected with the narrow
1237 matcher.
1238 matcher.
1238
1239
1239 If `includeexact` is True, then any exact matches from `match` will
1240 If `includeexact` is True, then any exact matches from `match` will
1240 be included even if they're outside the narrowspec.
1241 be included even if they're outside the narrowspec.
1241 """
1242 """
1242 if match:
1243 if match:
1243 if includeexact and not self._narrowmatch.always():
1244 if includeexact and not self._narrowmatch.always():
1244 # do not exclude explicitly-specified paths so that they can
1245 # do not exclude explicitly-specified paths so that they can
1245 # be warned later on
1246 # be warned later on
1246 em = matchmod.exact(match._root, match._cwd, match.files())
1247 em = matchmod.exact(match._root, match._cwd, match.files())
1247 nm = matchmod.unionmatcher([self._narrowmatch, em])
1248 nm = matchmod.unionmatcher([self._narrowmatch, em])
1248 return matchmod.intersectmatchers(match, nm)
1249 return matchmod.intersectmatchers(match, nm)
1249 return matchmod.intersectmatchers(match, self._narrowmatch)
1250 return matchmod.intersectmatchers(match, self._narrowmatch)
1250 return self._narrowmatch
1251 return self._narrowmatch
1251
1252
1252 def setnarrowpats(self, newincludes, newexcludes):
1253 def setnarrowpats(self, newincludes, newexcludes):
1253 narrowspec.save(self, newincludes, newexcludes)
1254 narrowspec.save(self, newincludes, newexcludes)
1255 narrowspec.copytoworkingcopy(self, self.currenttransaction())
1254 self.invalidate(clearfilecache=True)
1256 self.invalidate(clearfilecache=True)
1257 # So the next access won't be considered a conflict
1258 # TODO: It seems like there should be a way of doing this that
1259 # doesn't involve replacing these attributes.
1260 self.narrowpats = newincludes, newexcludes
1261 self._narrowmatch = narrowspec.match(self.root, include=newincludes,
1262 exclude=newexcludes)
1255
1263
1256 def __getitem__(self, changeid):
1264 def __getitem__(self, changeid):
1257 if changeid is None:
1265 if changeid is None:
1258 return context.workingctx(self)
1266 return context.workingctx(self)
1259 if isinstance(changeid, context.basectx):
1267 if isinstance(changeid, context.basectx):
1260 return changeid
1268 return changeid
1261 if isinstance(changeid, slice):
1269 if isinstance(changeid, slice):
1262 # wdirrev isn't contiguous so the slice shouldn't include it
1270 # wdirrev isn't contiguous so the slice shouldn't include it
1263 return [self[i]
1271 return [self[i]
1264 for i in pycompat.xrange(*changeid.indices(len(self)))
1272 for i in pycompat.xrange(*changeid.indices(len(self)))
1265 if i not in self.changelog.filteredrevs]
1273 if i not in self.changelog.filteredrevs]
1266 try:
1274 try:
1267 if isinstance(changeid, int):
1275 if isinstance(changeid, int):
1268 node = self.changelog.node(changeid)
1276 node = self.changelog.node(changeid)
1269 rev = changeid
1277 rev = changeid
1270 elif changeid == 'null':
1278 elif changeid == 'null':
1271 node = nullid
1279 node = nullid
1272 rev = nullrev
1280 rev = nullrev
1273 elif changeid == 'tip':
1281 elif changeid == 'tip':
1274 node = self.changelog.tip()
1282 node = self.changelog.tip()
1275 rev = self.changelog.rev(node)
1283 rev = self.changelog.rev(node)
1276 elif changeid == '.':
1284 elif changeid == '.':
1277 # this is a hack to delay/avoid loading obsmarkers
1285 # this is a hack to delay/avoid loading obsmarkers
1278 # when we know that '.' won't be hidden
1286 # when we know that '.' won't be hidden
1279 node = self.dirstate.p1()
1287 node = self.dirstate.p1()
1280 rev = self.unfiltered().changelog.rev(node)
1288 rev = self.unfiltered().changelog.rev(node)
1281 elif len(changeid) == 20:
1289 elif len(changeid) == 20:
1282 try:
1290 try:
1283 node = changeid
1291 node = changeid
1284 rev = self.changelog.rev(changeid)
1292 rev = self.changelog.rev(changeid)
1285 except error.FilteredLookupError:
1293 except error.FilteredLookupError:
1286 changeid = hex(changeid) # for the error message
1294 changeid = hex(changeid) # for the error message
1287 raise
1295 raise
1288 except LookupError:
1296 except LookupError:
1289 # check if it might have come from damaged dirstate
1297 # check if it might have come from damaged dirstate
1290 #
1298 #
1291 # XXX we could avoid the unfiltered if we had a recognizable
1299 # XXX we could avoid the unfiltered if we had a recognizable
1292 # exception for filtered changeset access
1300 # exception for filtered changeset access
1293 if (self.local()
1301 if (self.local()
1294 and changeid in self.unfiltered().dirstate.parents()):
1302 and changeid in self.unfiltered().dirstate.parents()):
1295 msg = _("working directory has unknown parent '%s'!")
1303 msg = _("working directory has unknown parent '%s'!")
1296 raise error.Abort(msg % short(changeid))
1304 raise error.Abort(msg % short(changeid))
1297 changeid = hex(changeid) # for the error message
1305 changeid = hex(changeid) # for the error message
1298 raise
1306 raise
1299
1307
1300 elif len(changeid) == 40:
1308 elif len(changeid) == 40:
1301 node = bin(changeid)
1309 node = bin(changeid)
1302 rev = self.changelog.rev(node)
1310 rev = self.changelog.rev(node)
1303 else:
1311 else:
1304 raise error.ProgrammingError(
1312 raise error.ProgrammingError(
1305 "unsupported changeid '%s' of type %s" %
1313 "unsupported changeid '%s' of type %s" %
1306 (changeid, type(changeid)))
1314 (changeid, type(changeid)))
1307
1315
1308 return context.changectx(self, rev, node)
1316 return context.changectx(self, rev, node)
1309
1317
1310 except (error.FilteredIndexError, error.FilteredLookupError):
1318 except (error.FilteredIndexError, error.FilteredLookupError):
1311 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1319 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1312 % pycompat.bytestr(changeid))
1320 % pycompat.bytestr(changeid))
1313 except (IndexError, LookupError):
1321 except (IndexError, LookupError):
1314 raise error.RepoLookupError(
1322 raise error.RepoLookupError(
1315 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1323 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1316 except error.WdirUnsupported:
1324 except error.WdirUnsupported:
1317 return context.workingctx(self)
1325 return context.workingctx(self)
1318
1326
1319 def __contains__(self, changeid):
1327 def __contains__(self, changeid):
1320 """True if the given changeid exists
1328 """True if the given changeid exists
1321
1329
1322 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1330 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1323 specified.
1331 specified.
1324 """
1332 """
1325 try:
1333 try:
1326 self[changeid]
1334 self[changeid]
1327 return True
1335 return True
1328 except error.RepoLookupError:
1336 except error.RepoLookupError:
1329 return False
1337 return False
1330
1338
1331 def __nonzero__(self):
1339 def __nonzero__(self):
1332 return True
1340 return True
1333
1341
1334 __bool__ = __nonzero__
1342 __bool__ = __nonzero__
1335
1343
1336 def __len__(self):
1344 def __len__(self):
1337 # no need to pay the cost of repoview.changelog
1345 # no need to pay the cost of repoview.changelog
1338 unfi = self.unfiltered()
1346 unfi = self.unfiltered()
1339 return len(unfi.changelog)
1347 return len(unfi.changelog)
1340
1348
1341 def __iter__(self):
1349 def __iter__(self):
1342 return iter(self.changelog)
1350 return iter(self.changelog)
1343
1351
1344 def revs(self, expr, *args):
1352 def revs(self, expr, *args):
1345 '''Find revisions matching a revset.
1353 '''Find revisions matching a revset.
1346
1354
1347 The revset is specified as a string ``expr`` that may contain
1355 The revset is specified as a string ``expr`` that may contain
1348 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1356 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1349
1357
1350 Revset aliases from the configuration are not expanded. To expand
1358 Revset aliases from the configuration are not expanded. To expand
1351 user aliases, consider calling ``scmutil.revrange()`` or
1359 user aliases, consider calling ``scmutil.revrange()`` or
1352 ``repo.anyrevs([expr], user=True)``.
1360 ``repo.anyrevs([expr], user=True)``.
1353
1361
1354 Returns a revset.abstractsmartset, which is a list-like interface
1362 Returns a revset.abstractsmartset, which is a list-like interface
1355 that contains integer revisions.
1363 that contains integer revisions.
1356 '''
1364 '''
1357 expr = revsetlang.formatspec(expr, *args)
1365 expr = revsetlang.formatspec(expr, *args)
1358 m = revset.match(None, expr)
1366 m = revset.match(None, expr)
1359 return m(self)
1367 return m(self)
1360
1368
1361 def set(self, expr, *args):
1369 def set(self, expr, *args):
1362 '''Find revisions matching a revset and emit changectx instances.
1370 '''Find revisions matching a revset and emit changectx instances.
1363
1371
1364 This is a convenience wrapper around ``revs()`` that iterates the
1372 This is a convenience wrapper around ``revs()`` that iterates the
1365 result and is a generator of changectx instances.
1373 result and is a generator of changectx instances.
1366
1374
1367 Revset aliases from the configuration are not expanded. To expand
1375 Revset aliases from the configuration are not expanded. To expand
1368 user aliases, consider calling ``scmutil.revrange()``.
1376 user aliases, consider calling ``scmutil.revrange()``.
1369 '''
1377 '''
1370 for r in self.revs(expr, *args):
1378 for r in self.revs(expr, *args):
1371 yield self[r]
1379 yield self[r]
1372
1380
1373 def anyrevs(self, specs, user=False, localalias=None):
1381 def anyrevs(self, specs, user=False, localalias=None):
1374 '''Find revisions matching one of the given revsets.
1382 '''Find revisions matching one of the given revsets.
1375
1383
1376 Revset aliases from the configuration are not expanded by default. To
1384 Revset aliases from the configuration are not expanded by default. To
1377 expand user aliases, specify ``user=True``. To provide some local
1385 expand user aliases, specify ``user=True``. To provide some local
1378 definitions overriding user aliases, set ``localalias`` to
1386 definitions overriding user aliases, set ``localalias`` to
1379 ``{name: definitionstring}``.
1387 ``{name: definitionstring}``.
1380 '''
1388 '''
1381 if user:
1389 if user:
1382 m = revset.matchany(self.ui, specs,
1390 m = revset.matchany(self.ui, specs,
1383 lookup=revset.lookupfn(self),
1391 lookup=revset.lookupfn(self),
1384 localalias=localalias)
1392 localalias=localalias)
1385 else:
1393 else:
1386 m = revset.matchany(None, specs, localalias=localalias)
1394 m = revset.matchany(None, specs, localalias=localalias)
1387 return m(self)
1395 return m(self)
1388
1396
1389 def url(self):
1397 def url(self):
1390 return 'file:' + self.root
1398 return 'file:' + self.root
1391
1399
1392 def hook(self, name, throw=False, **args):
1400 def hook(self, name, throw=False, **args):
1393 """Call a hook, passing this repo instance.
1401 """Call a hook, passing this repo instance.
1394
1402
1395 This a convenience method to aid invoking hooks. Extensions likely
1403 This a convenience method to aid invoking hooks. Extensions likely
1396 won't call this unless they have registered a custom hook or are
1404 won't call this unless they have registered a custom hook or are
1397 replacing code that is expected to call a hook.
1405 replacing code that is expected to call a hook.
1398 """
1406 """
1399 return hook.hook(self.ui, self, name, throw, **args)
1407 return hook.hook(self.ui, self, name, throw, **args)
1400
1408
1401 @filteredpropertycache
1409 @filteredpropertycache
1402 def _tagscache(self):
1410 def _tagscache(self):
1403 '''Returns a tagscache object that contains various tags related
1411 '''Returns a tagscache object that contains various tags related
1404 caches.'''
1412 caches.'''
1405
1413
1406 # This simplifies its cache management by having one decorated
1414 # This simplifies its cache management by having one decorated
1407 # function (this one) and the rest simply fetch things from it.
1415 # function (this one) and the rest simply fetch things from it.
1408 class tagscache(object):
1416 class tagscache(object):
1409 def __init__(self):
1417 def __init__(self):
1410 # These two define the set of tags for this repository. tags
1418 # These two define the set of tags for this repository. tags
1411 # maps tag name to node; tagtypes maps tag name to 'global' or
1419 # maps tag name to node; tagtypes maps tag name to 'global' or
1412 # 'local'. (Global tags are defined by .hgtags across all
1420 # 'local'. (Global tags are defined by .hgtags across all
1413 # heads, and local tags are defined in .hg/localtags.)
1421 # heads, and local tags are defined in .hg/localtags.)
1414 # They constitute the in-memory cache of tags.
1422 # They constitute the in-memory cache of tags.
1415 self.tags = self.tagtypes = None
1423 self.tags = self.tagtypes = None
1416
1424
1417 self.nodetagscache = self.tagslist = None
1425 self.nodetagscache = self.tagslist = None
1418
1426
1419 cache = tagscache()
1427 cache = tagscache()
1420 cache.tags, cache.tagtypes = self._findtags()
1428 cache.tags, cache.tagtypes = self._findtags()
1421
1429
1422 return cache
1430 return cache
1423
1431
1424 def tags(self):
1432 def tags(self):
1425 '''return a mapping of tag to node'''
1433 '''return a mapping of tag to node'''
1426 t = {}
1434 t = {}
1427 if self.changelog.filteredrevs:
1435 if self.changelog.filteredrevs:
1428 tags, tt = self._findtags()
1436 tags, tt = self._findtags()
1429 else:
1437 else:
1430 tags = self._tagscache.tags
1438 tags = self._tagscache.tags
1431 rev = self.changelog.rev
1439 rev = self.changelog.rev
1432 for k, v in tags.iteritems():
1440 for k, v in tags.iteritems():
1433 try:
1441 try:
1434 # ignore tags to unknown nodes
1442 # ignore tags to unknown nodes
1435 rev(v)
1443 rev(v)
1436 t[k] = v
1444 t[k] = v
1437 except (error.LookupError, ValueError):
1445 except (error.LookupError, ValueError):
1438 pass
1446 pass
1439 return t
1447 return t
1440
1448
1441 def _findtags(self):
1449 def _findtags(self):
1442 '''Do the hard work of finding tags. Return a pair of dicts
1450 '''Do the hard work of finding tags. Return a pair of dicts
1443 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1451 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1444 maps tag name to a string like \'global\' or \'local\'.
1452 maps tag name to a string like \'global\' or \'local\'.
1445 Subclasses or extensions are free to add their own tags, but
1453 Subclasses or extensions are free to add their own tags, but
1446 should be aware that the returned dicts will be retained for the
1454 should be aware that the returned dicts will be retained for the
1447 duration of the localrepo object.'''
1455 duration of the localrepo object.'''
1448
1456
1449 # XXX what tagtype should subclasses/extensions use? Currently
1457 # XXX what tagtype should subclasses/extensions use? Currently
1450 # mq and bookmarks add tags, but do not set the tagtype at all.
1458 # mq and bookmarks add tags, but do not set the tagtype at all.
1451 # Should each extension invent its own tag type? Should there
1459 # Should each extension invent its own tag type? Should there
1452 # be one tagtype for all such "virtual" tags? Or is the status
1460 # be one tagtype for all such "virtual" tags? Or is the status
1453 # quo fine?
1461 # quo fine?
1454
1462
1455
1463
1456 # map tag name to (node, hist)
1464 # map tag name to (node, hist)
1457 alltags = tagsmod.findglobaltags(self.ui, self)
1465 alltags = tagsmod.findglobaltags(self.ui, self)
1458 # map tag name to tag type
1466 # map tag name to tag type
1459 tagtypes = dict((tag, 'global') for tag in alltags)
1467 tagtypes = dict((tag, 'global') for tag in alltags)
1460
1468
1461 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1469 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1462
1470
1463 # Build the return dicts. Have to re-encode tag names because
1471 # Build the return dicts. Have to re-encode tag names because
1464 # the tags module always uses UTF-8 (in order not to lose info
1472 # the tags module always uses UTF-8 (in order not to lose info
1465 # writing to the cache), but the rest of Mercurial wants them in
1473 # writing to the cache), but the rest of Mercurial wants them in
1466 # local encoding.
1474 # local encoding.
1467 tags = {}
1475 tags = {}
1468 for (name, (node, hist)) in alltags.iteritems():
1476 for (name, (node, hist)) in alltags.iteritems():
1469 if node != nullid:
1477 if node != nullid:
1470 tags[encoding.tolocal(name)] = node
1478 tags[encoding.tolocal(name)] = node
1471 tags['tip'] = self.changelog.tip()
1479 tags['tip'] = self.changelog.tip()
1472 tagtypes = dict([(encoding.tolocal(name), value)
1480 tagtypes = dict([(encoding.tolocal(name), value)
1473 for (name, value) in tagtypes.iteritems()])
1481 for (name, value) in tagtypes.iteritems()])
1474 return (tags, tagtypes)
1482 return (tags, tagtypes)
1475
1483
1476 def tagtype(self, tagname):
1484 def tagtype(self, tagname):
1477 '''
1485 '''
1478 return the type of the given tag. result can be:
1486 return the type of the given tag. result can be:
1479
1487
1480 'local' : a local tag
1488 'local' : a local tag
1481 'global' : a global tag
1489 'global' : a global tag
1482 None : tag does not exist
1490 None : tag does not exist
1483 '''
1491 '''
1484
1492
1485 return self._tagscache.tagtypes.get(tagname)
1493 return self._tagscache.tagtypes.get(tagname)
1486
1494
1487 def tagslist(self):
1495 def tagslist(self):
1488 '''return a list of tags ordered by revision'''
1496 '''return a list of tags ordered by revision'''
1489 if not self._tagscache.tagslist:
1497 if not self._tagscache.tagslist:
1490 l = []
1498 l = []
1491 for t, n in self.tags().iteritems():
1499 for t, n in self.tags().iteritems():
1492 l.append((self.changelog.rev(n), t, n))
1500 l.append((self.changelog.rev(n), t, n))
1493 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1501 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1494
1502
1495 return self._tagscache.tagslist
1503 return self._tagscache.tagslist
1496
1504
1497 def nodetags(self, node):
1505 def nodetags(self, node):
1498 '''return the tags associated with a node'''
1506 '''return the tags associated with a node'''
1499 if not self._tagscache.nodetagscache:
1507 if not self._tagscache.nodetagscache:
1500 nodetagscache = {}
1508 nodetagscache = {}
1501 for t, n in self._tagscache.tags.iteritems():
1509 for t, n in self._tagscache.tags.iteritems():
1502 nodetagscache.setdefault(n, []).append(t)
1510 nodetagscache.setdefault(n, []).append(t)
1503 for tags in nodetagscache.itervalues():
1511 for tags in nodetagscache.itervalues():
1504 tags.sort()
1512 tags.sort()
1505 self._tagscache.nodetagscache = nodetagscache
1513 self._tagscache.nodetagscache = nodetagscache
1506 return self._tagscache.nodetagscache.get(node, [])
1514 return self._tagscache.nodetagscache.get(node, [])
1507
1515
1508 def nodebookmarks(self, node):
1516 def nodebookmarks(self, node):
1509 """return the list of bookmarks pointing to the specified node"""
1517 """return the list of bookmarks pointing to the specified node"""
1510 return self._bookmarks.names(node)
1518 return self._bookmarks.names(node)
1511
1519
1512 def branchmap(self):
1520 def branchmap(self):
1513 '''returns a dictionary {branch: [branchheads]} with branchheads
1521 '''returns a dictionary {branch: [branchheads]} with branchheads
1514 ordered by increasing revision number'''
1522 ordered by increasing revision number'''
1515 branchmap.updatecache(self)
1523 branchmap.updatecache(self)
1516 return self._branchcaches[self.filtername]
1524 return self._branchcaches[self.filtername]
1517
1525
1518 @unfilteredmethod
1526 @unfilteredmethod
1519 def revbranchcache(self):
1527 def revbranchcache(self):
1520 if not self._revbranchcache:
1528 if not self._revbranchcache:
1521 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1529 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1522 return self._revbranchcache
1530 return self._revbranchcache
1523
1531
1524 def branchtip(self, branch, ignoremissing=False):
1532 def branchtip(self, branch, ignoremissing=False):
1525 '''return the tip node for a given branch
1533 '''return the tip node for a given branch
1526
1534
1527 If ignoremissing is True, then this method will not raise an error.
1535 If ignoremissing is True, then this method will not raise an error.
1528 This is helpful for callers that only expect None for a missing branch
1536 This is helpful for callers that only expect None for a missing branch
1529 (e.g. namespace).
1537 (e.g. namespace).
1530
1538
1531 '''
1539 '''
1532 try:
1540 try:
1533 return self.branchmap().branchtip(branch)
1541 return self.branchmap().branchtip(branch)
1534 except KeyError:
1542 except KeyError:
1535 if not ignoremissing:
1543 if not ignoremissing:
1536 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1544 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1537 else:
1545 else:
1538 pass
1546 pass
1539
1547
1540 def lookup(self, key):
1548 def lookup(self, key):
1541 return scmutil.revsymbol(self, key).node()
1549 return scmutil.revsymbol(self, key).node()
1542
1550
1543 def lookupbranch(self, key):
1551 def lookupbranch(self, key):
1544 if key in self.branchmap():
1552 if key in self.branchmap():
1545 return key
1553 return key
1546
1554
1547 return scmutil.revsymbol(self, key).branch()
1555 return scmutil.revsymbol(self, key).branch()
1548
1556
1549 def known(self, nodes):
1557 def known(self, nodes):
1550 cl = self.changelog
1558 cl = self.changelog
1551 nm = cl.nodemap
1559 nm = cl.nodemap
1552 filtered = cl.filteredrevs
1560 filtered = cl.filteredrevs
1553 result = []
1561 result = []
1554 for n in nodes:
1562 for n in nodes:
1555 r = nm.get(n)
1563 r = nm.get(n)
1556 resp = not (r is None or r in filtered)
1564 resp = not (r is None or r in filtered)
1557 result.append(resp)
1565 result.append(resp)
1558 return result
1566 return result
1559
1567
1560 def local(self):
1568 def local(self):
1561 return self
1569 return self
1562
1570
1563 def publishing(self):
1571 def publishing(self):
1564 # it's safe (and desirable) to trust the publish flag unconditionally
1572 # it's safe (and desirable) to trust the publish flag unconditionally
1565 # so that we don't finalize changes shared between users via ssh or nfs
1573 # so that we don't finalize changes shared between users via ssh or nfs
1566 return self.ui.configbool('phases', 'publish', untrusted=True)
1574 return self.ui.configbool('phases', 'publish', untrusted=True)
1567
1575
1568 def cancopy(self):
1576 def cancopy(self):
1569 # so statichttprepo's override of local() works
1577 # so statichttprepo's override of local() works
1570 if not self.local():
1578 if not self.local():
1571 return False
1579 return False
1572 if not self.publishing():
1580 if not self.publishing():
1573 return True
1581 return True
1574 # if publishing we can't copy if there is filtered content
1582 # if publishing we can't copy if there is filtered content
1575 return not self.filtered('visible').changelog.filteredrevs
1583 return not self.filtered('visible').changelog.filteredrevs
1576
1584
1577 def shared(self):
1585 def shared(self):
1578 '''the type of shared repository (None if not shared)'''
1586 '''the type of shared repository (None if not shared)'''
1579 if self.sharedpath != self.path:
1587 if self.sharedpath != self.path:
1580 return 'store'
1588 return 'store'
1581 return None
1589 return None
1582
1590
1583 def wjoin(self, f, *insidef):
1591 def wjoin(self, f, *insidef):
1584 return self.vfs.reljoin(self.root, f, *insidef)
1592 return self.vfs.reljoin(self.root, f, *insidef)
1585
1593
1586 def setparents(self, p1, p2=nullid):
1594 def setparents(self, p1, p2=nullid):
1587 with self.dirstate.parentchange():
1595 with self.dirstate.parentchange():
1588 copies = self.dirstate.setparents(p1, p2)
1596 copies = self.dirstate.setparents(p1, p2)
1589 pctx = self[p1]
1597 pctx = self[p1]
1590 if copies:
1598 if copies:
1591 # Adjust copy records, the dirstate cannot do it, it
1599 # Adjust copy records, the dirstate cannot do it, it
1592 # requires access to parents manifests. Preserve them
1600 # requires access to parents manifests. Preserve them
1593 # only for entries added to first parent.
1601 # only for entries added to first parent.
1594 for f in copies:
1602 for f in copies:
1595 if f not in pctx and copies[f] in pctx:
1603 if f not in pctx and copies[f] in pctx:
1596 self.dirstate.copy(copies[f], f)
1604 self.dirstate.copy(copies[f], f)
1597 if p2 == nullid:
1605 if p2 == nullid:
1598 for f, s in sorted(self.dirstate.copies().items()):
1606 for f, s in sorted(self.dirstate.copies().items()):
1599 if f not in pctx and s not in pctx:
1607 if f not in pctx and s not in pctx:
1600 self.dirstate.copy(None, f)
1608 self.dirstate.copy(None, f)
1601
1609
1602 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1610 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1603 """changeid must be a changeset revision, if specified.
1611 """changeid must be a changeset revision, if specified.
1604 fileid can be a file revision or node."""
1612 fileid can be a file revision or node."""
1605 return context.filectx(self, path, changeid, fileid,
1613 return context.filectx(self, path, changeid, fileid,
1606 changectx=changectx)
1614 changectx=changectx)
1607
1615
1608 def getcwd(self):
1616 def getcwd(self):
1609 return self.dirstate.getcwd()
1617 return self.dirstate.getcwd()
1610
1618
1611 def pathto(self, f, cwd=None):
1619 def pathto(self, f, cwd=None):
1612 return self.dirstate.pathto(f, cwd)
1620 return self.dirstate.pathto(f, cwd)
1613
1621
1614 def _loadfilter(self, filter):
1622 def _loadfilter(self, filter):
1615 if filter not in self._filterpats:
1623 if filter not in self._filterpats:
1616 l = []
1624 l = []
1617 for pat, cmd in self.ui.configitems(filter):
1625 for pat, cmd in self.ui.configitems(filter):
1618 if cmd == '!':
1626 if cmd == '!':
1619 continue
1627 continue
1620 mf = matchmod.match(self.root, '', [pat])
1628 mf = matchmod.match(self.root, '', [pat])
1621 fn = None
1629 fn = None
1622 params = cmd
1630 params = cmd
1623 for name, filterfn in self._datafilters.iteritems():
1631 for name, filterfn in self._datafilters.iteritems():
1624 if cmd.startswith(name):
1632 if cmd.startswith(name):
1625 fn = filterfn
1633 fn = filterfn
1626 params = cmd[len(name):].lstrip()
1634 params = cmd[len(name):].lstrip()
1627 break
1635 break
1628 if not fn:
1636 if not fn:
1629 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1637 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1630 # Wrap old filters not supporting keyword arguments
1638 # Wrap old filters not supporting keyword arguments
1631 if not pycompat.getargspec(fn)[2]:
1639 if not pycompat.getargspec(fn)[2]:
1632 oldfn = fn
1640 oldfn = fn
1633 fn = lambda s, c, **kwargs: oldfn(s, c)
1641 fn = lambda s, c, **kwargs: oldfn(s, c)
1634 l.append((mf, fn, params))
1642 l.append((mf, fn, params))
1635 self._filterpats[filter] = l
1643 self._filterpats[filter] = l
1636 return self._filterpats[filter]
1644 return self._filterpats[filter]
1637
1645
1638 def _filter(self, filterpats, filename, data):
1646 def _filter(self, filterpats, filename, data):
1639 for mf, fn, cmd in filterpats:
1647 for mf, fn, cmd in filterpats:
1640 if mf(filename):
1648 if mf(filename):
1641 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1649 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1642 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1650 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1643 break
1651 break
1644
1652
1645 return data
1653 return data
1646
1654
1647 @unfilteredpropertycache
1655 @unfilteredpropertycache
1648 def _encodefilterpats(self):
1656 def _encodefilterpats(self):
1649 return self._loadfilter('encode')
1657 return self._loadfilter('encode')
1650
1658
1651 @unfilteredpropertycache
1659 @unfilteredpropertycache
1652 def _decodefilterpats(self):
1660 def _decodefilterpats(self):
1653 return self._loadfilter('decode')
1661 return self._loadfilter('decode')
1654
1662
1655 def adddatafilter(self, name, filter):
1663 def adddatafilter(self, name, filter):
1656 self._datafilters[name] = filter
1664 self._datafilters[name] = filter
1657
1665
1658 def wread(self, filename):
1666 def wread(self, filename):
1659 if self.wvfs.islink(filename):
1667 if self.wvfs.islink(filename):
1660 data = self.wvfs.readlink(filename)
1668 data = self.wvfs.readlink(filename)
1661 else:
1669 else:
1662 data = self.wvfs.read(filename)
1670 data = self.wvfs.read(filename)
1663 return self._filter(self._encodefilterpats, filename, data)
1671 return self._filter(self._encodefilterpats, filename, data)
1664
1672
1665 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1673 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1666 """write ``data`` into ``filename`` in the working directory
1674 """write ``data`` into ``filename`` in the working directory
1667
1675
1668 This returns length of written (maybe decoded) data.
1676 This returns length of written (maybe decoded) data.
1669 """
1677 """
1670 data = self._filter(self._decodefilterpats, filename, data)
1678 data = self._filter(self._decodefilterpats, filename, data)
1671 if 'l' in flags:
1679 if 'l' in flags:
1672 self.wvfs.symlink(data, filename)
1680 self.wvfs.symlink(data, filename)
1673 else:
1681 else:
1674 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1682 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1675 **kwargs)
1683 **kwargs)
1676 if 'x' in flags:
1684 if 'x' in flags:
1677 self.wvfs.setflags(filename, False, True)
1685 self.wvfs.setflags(filename, False, True)
1678 else:
1686 else:
1679 self.wvfs.setflags(filename, False, False)
1687 self.wvfs.setflags(filename, False, False)
1680 return len(data)
1688 return len(data)
1681
1689
1682 def wwritedata(self, filename, data):
1690 def wwritedata(self, filename, data):
1683 return self._filter(self._decodefilterpats, filename, data)
1691 return self._filter(self._decodefilterpats, filename, data)
1684
1692
1685 def currenttransaction(self):
1693 def currenttransaction(self):
1686 """return the current transaction or None if non exists"""
1694 """return the current transaction or None if non exists"""
1687 if self._transref:
1695 if self._transref:
1688 tr = self._transref()
1696 tr = self._transref()
1689 else:
1697 else:
1690 tr = None
1698 tr = None
1691
1699
1692 if tr and tr.running():
1700 if tr and tr.running():
1693 return tr
1701 return tr
1694 return None
1702 return None
1695
1703
1696 def transaction(self, desc, report=None):
1704 def transaction(self, desc, report=None):
1697 if (self.ui.configbool('devel', 'all-warnings')
1705 if (self.ui.configbool('devel', 'all-warnings')
1698 or self.ui.configbool('devel', 'check-locks')):
1706 or self.ui.configbool('devel', 'check-locks')):
1699 if self._currentlock(self._lockref) is None:
1707 if self._currentlock(self._lockref) is None:
1700 raise error.ProgrammingError('transaction requires locking')
1708 raise error.ProgrammingError('transaction requires locking')
1701 tr = self.currenttransaction()
1709 tr = self.currenttransaction()
1702 if tr is not None:
1710 if tr is not None:
1703 return tr.nest(name=desc)
1711 return tr.nest(name=desc)
1704
1712
1705 # abort here if the journal already exists
1713 # abort here if the journal already exists
1706 if self.svfs.exists("journal"):
1714 if self.svfs.exists("journal"):
1707 raise error.RepoError(
1715 raise error.RepoError(
1708 _("abandoned transaction found"),
1716 _("abandoned transaction found"),
1709 hint=_("run 'hg recover' to clean up transaction"))
1717 hint=_("run 'hg recover' to clean up transaction"))
1710
1718
1711 idbase = "%.40f#%f" % (random.random(), time.time())
1719 idbase = "%.40f#%f" % (random.random(), time.time())
1712 ha = hex(hashlib.sha1(idbase).digest())
1720 ha = hex(hashlib.sha1(idbase).digest())
1713 txnid = 'TXN:' + ha
1721 txnid = 'TXN:' + ha
1714 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1722 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1715
1723
1716 self._writejournal(desc)
1724 self._writejournal(desc)
1717 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1725 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1718 if report:
1726 if report:
1719 rp = report
1727 rp = report
1720 else:
1728 else:
1721 rp = self.ui.warn
1729 rp = self.ui.warn
1722 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1730 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1723 # we must avoid cyclic reference between repo and transaction.
1731 # we must avoid cyclic reference between repo and transaction.
1724 reporef = weakref.ref(self)
1732 reporef = weakref.ref(self)
1725 # Code to track tag movement
1733 # Code to track tag movement
1726 #
1734 #
1727 # Since tags are all handled as file content, it is actually quite hard
1735 # Since tags are all handled as file content, it is actually quite hard
1728 # to track these movement from a code perspective. So we fallback to a
1736 # to track these movement from a code perspective. So we fallback to a
1729 # tracking at the repository level. One could envision to track changes
1737 # tracking at the repository level. One could envision to track changes
1730 # to the '.hgtags' file through changegroup apply but that fails to
1738 # to the '.hgtags' file through changegroup apply but that fails to
1731 # cope with case where transaction expose new heads without changegroup
1739 # cope with case where transaction expose new heads without changegroup
1732 # being involved (eg: phase movement).
1740 # being involved (eg: phase movement).
1733 #
1741 #
1734 # For now, We gate the feature behind a flag since this likely comes
1742 # For now, We gate the feature behind a flag since this likely comes
1735 # with performance impacts. The current code run more often than needed
1743 # with performance impacts. The current code run more often than needed
1736 # and do not use caches as much as it could. The current focus is on
1744 # and do not use caches as much as it could. The current focus is on
1737 # the behavior of the feature so we disable it by default. The flag
1745 # the behavior of the feature so we disable it by default. The flag
1738 # will be removed when we are happy with the performance impact.
1746 # will be removed when we are happy with the performance impact.
1739 #
1747 #
1740 # Once this feature is no longer experimental move the following
1748 # Once this feature is no longer experimental move the following
1741 # documentation to the appropriate help section:
1749 # documentation to the appropriate help section:
1742 #
1750 #
1743 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1751 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1744 # tags (new or changed or deleted tags). In addition the details of
1752 # tags (new or changed or deleted tags). In addition the details of
1745 # these changes are made available in a file at:
1753 # these changes are made available in a file at:
1746 # ``REPOROOT/.hg/changes/tags.changes``.
1754 # ``REPOROOT/.hg/changes/tags.changes``.
1747 # Make sure you check for HG_TAG_MOVED before reading that file as it
1755 # Make sure you check for HG_TAG_MOVED before reading that file as it
1748 # might exist from a previous transaction even if no tag were touched
1756 # might exist from a previous transaction even if no tag were touched
1749 # in this one. Changes are recorded in a line base format::
1757 # in this one. Changes are recorded in a line base format::
1750 #
1758 #
1751 # <action> <hex-node> <tag-name>\n
1759 # <action> <hex-node> <tag-name>\n
1752 #
1760 #
1753 # Actions are defined as follow:
1761 # Actions are defined as follow:
1754 # "-R": tag is removed,
1762 # "-R": tag is removed,
1755 # "+A": tag is added,
1763 # "+A": tag is added,
1756 # "-M": tag is moved (old value),
1764 # "-M": tag is moved (old value),
1757 # "+M": tag is moved (new value),
1765 # "+M": tag is moved (new value),
1758 tracktags = lambda x: None
1766 tracktags = lambda x: None
1759 # experimental config: experimental.hook-track-tags
1767 # experimental config: experimental.hook-track-tags
1760 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1768 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1761 if desc != 'strip' and shouldtracktags:
1769 if desc != 'strip' and shouldtracktags:
1762 oldheads = self.changelog.headrevs()
1770 oldheads = self.changelog.headrevs()
1763 def tracktags(tr2):
1771 def tracktags(tr2):
1764 repo = reporef()
1772 repo = reporef()
1765 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1773 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1766 newheads = repo.changelog.headrevs()
1774 newheads = repo.changelog.headrevs()
1767 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1775 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1768 # notes: we compare lists here.
1776 # notes: we compare lists here.
1769 # As we do it only once buiding set would not be cheaper
1777 # As we do it only once buiding set would not be cheaper
1770 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1778 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1771 if changes:
1779 if changes:
1772 tr2.hookargs['tag_moved'] = '1'
1780 tr2.hookargs['tag_moved'] = '1'
1773 with repo.vfs('changes/tags.changes', 'w',
1781 with repo.vfs('changes/tags.changes', 'w',
1774 atomictemp=True) as changesfile:
1782 atomictemp=True) as changesfile:
1775 # note: we do not register the file to the transaction
1783 # note: we do not register the file to the transaction
1776 # because we needs it to still exist on the transaction
1784 # because we needs it to still exist on the transaction
1777 # is close (for txnclose hooks)
1785 # is close (for txnclose hooks)
1778 tagsmod.writediff(changesfile, changes)
1786 tagsmod.writediff(changesfile, changes)
1779 def validate(tr2):
1787 def validate(tr2):
1780 """will run pre-closing hooks"""
1788 """will run pre-closing hooks"""
1781 # XXX the transaction API is a bit lacking here so we take a hacky
1789 # XXX the transaction API is a bit lacking here so we take a hacky
1782 # path for now
1790 # path for now
1783 #
1791 #
1784 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1792 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1785 # dict is copied before these run. In addition we needs the data
1793 # dict is copied before these run. In addition we needs the data
1786 # available to in memory hooks too.
1794 # available to in memory hooks too.
1787 #
1795 #
1788 # Moreover, we also need to make sure this runs before txnclose
1796 # Moreover, we also need to make sure this runs before txnclose
1789 # hooks and there is no "pending" mechanism that would execute
1797 # hooks and there is no "pending" mechanism that would execute
1790 # logic only if hooks are about to run.
1798 # logic only if hooks are about to run.
1791 #
1799 #
1792 # Fixing this limitation of the transaction is also needed to track
1800 # Fixing this limitation of the transaction is also needed to track
1793 # other families of changes (bookmarks, phases, obsolescence).
1801 # other families of changes (bookmarks, phases, obsolescence).
1794 #
1802 #
1795 # This will have to be fixed before we remove the experimental
1803 # This will have to be fixed before we remove the experimental
1796 # gating.
1804 # gating.
1797 tracktags(tr2)
1805 tracktags(tr2)
1798 repo = reporef()
1806 repo = reporef()
1799 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1807 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1800 scmutil.enforcesinglehead(repo, tr2, desc)
1808 scmutil.enforcesinglehead(repo, tr2, desc)
1801 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1809 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1802 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1810 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1803 args = tr.hookargs.copy()
1811 args = tr.hookargs.copy()
1804 args.update(bookmarks.preparehookargs(name, old, new))
1812 args.update(bookmarks.preparehookargs(name, old, new))
1805 repo.hook('pretxnclose-bookmark', throw=True,
1813 repo.hook('pretxnclose-bookmark', throw=True,
1806 txnname=desc,
1814 txnname=desc,
1807 **pycompat.strkwargs(args))
1815 **pycompat.strkwargs(args))
1808 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1816 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1809 cl = repo.unfiltered().changelog
1817 cl = repo.unfiltered().changelog
1810 for rev, (old, new) in tr.changes['phases'].items():
1818 for rev, (old, new) in tr.changes['phases'].items():
1811 args = tr.hookargs.copy()
1819 args = tr.hookargs.copy()
1812 node = hex(cl.node(rev))
1820 node = hex(cl.node(rev))
1813 args.update(phases.preparehookargs(node, old, new))
1821 args.update(phases.preparehookargs(node, old, new))
1814 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1822 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1815 **pycompat.strkwargs(args))
1823 **pycompat.strkwargs(args))
1816
1824
1817 repo.hook('pretxnclose', throw=True,
1825 repo.hook('pretxnclose', throw=True,
1818 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1826 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1819 def releasefn(tr, success):
1827 def releasefn(tr, success):
1820 repo = reporef()
1828 repo = reporef()
1821 if success:
1829 if success:
1822 # this should be explicitly invoked here, because
1830 # this should be explicitly invoked here, because
1823 # in-memory changes aren't written out at closing
1831 # in-memory changes aren't written out at closing
1824 # transaction, if tr.addfilegenerator (via
1832 # transaction, if tr.addfilegenerator (via
1825 # dirstate.write or so) isn't invoked while
1833 # dirstate.write or so) isn't invoked while
1826 # transaction running
1834 # transaction running
1827 repo.dirstate.write(None)
1835 repo.dirstate.write(None)
1828 else:
1836 else:
1829 # discard all changes (including ones already written
1837 # discard all changes (including ones already written
1830 # out) in this transaction
1838 # out) in this transaction
1831 narrowspec.restorebackup(self, 'journal.narrowspec')
1839 narrowspec.restorebackup(self, 'journal.narrowspec')
1832 repo.dirstate.restorebackup(None, 'journal.dirstate')
1840 repo.dirstate.restorebackup(None, 'journal.dirstate')
1833
1841
1834 repo.invalidate(clearfilecache=True)
1842 repo.invalidate(clearfilecache=True)
1835
1843
1836 tr = transaction.transaction(rp, self.svfs, vfsmap,
1844 tr = transaction.transaction(rp, self.svfs, vfsmap,
1837 "journal",
1845 "journal",
1838 "undo",
1846 "undo",
1839 aftertrans(renames),
1847 aftertrans(renames),
1840 self.store.createmode,
1848 self.store.createmode,
1841 validator=validate,
1849 validator=validate,
1842 releasefn=releasefn,
1850 releasefn=releasefn,
1843 checkambigfiles=_cachedfiles,
1851 checkambigfiles=_cachedfiles,
1844 name=desc)
1852 name=desc)
1845 tr.changes['origrepolen'] = len(self)
1853 tr.changes['origrepolen'] = len(self)
1846 tr.changes['obsmarkers'] = set()
1854 tr.changes['obsmarkers'] = set()
1847 tr.changes['phases'] = {}
1855 tr.changes['phases'] = {}
1848 tr.changes['bookmarks'] = {}
1856 tr.changes['bookmarks'] = {}
1849
1857
1850 tr.hookargs['txnid'] = txnid
1858 tr.hookargs['txnid'] = txnid
1851 # note: writing the fncache only during finalize mean that the file is
1859 # note: writing the fncache only during finalize mean that the file is
1852 # outdated when running hooks. As fncache is used for streaming clone,
1860 # outdated when running hooks. As fncache is used for streaming clone,
1853 # this is not expected to break anything that happen during the hooks.
1861 # this is not expected to break anything that happen during the hooks.
1854 tr.addfinalize('flush-fncache', self.store.write)
1862 tr.addfinalize('flush-fncache', self.store.write)
1855 def txnclosehook(tr2):
1863 def txnclosehook(tr2):
1856 """To be run if transaction is successful, will schedule a hook run
1864 """To be run if transaction is successful, will schedule a hook run
1857 """
1865 """
1858 # Don't reference tr2 in hook() so we don't hold a reference.
1866 # Don't reference tr2 in hook() so we don't hold a reference.
1859 # This reduces memory consumption when there are multiple
1867 # This reduces memory consumption when there are multiple
1860 # transactions per lock. This can likely go away if issue5045
1868 # transactions per lock. This can likely go away if issue5045
1861 # fixes the function accumulation.
1869 # fixes the function accumulation.
1862 hookargs = tr2.hookargs
1870 hookargs = tr2.hookargs
1863
1871
1864 def hookfunc():
1872 def hookfunc():
1865 repo = reporef()
1873 repo = reporef()
1866 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1874 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1867 bmchanges = sorted(tr.changes['bookmarks'].items())
1875 bmchanges = sorted(tr.changes['bookmarks'].items())
1868 for name, (old, new) in bmchanges:
1876 for name, (old, new) in bmchanges:
1869 args = tr.hookargs.copy()
1877 args = tr.hookargs.copy()
1870 args.update(bookmarks.preparehookargs(name, old, new))
1878 args.update(bookmarks.preparehookargs(name, old, new))
1871 repo.hook('txnclose-bookmark', throw=False,
1879 repo.hook('txnclose-bookmark', throw=False,
1872 txnname=desc, **pycompat.strkwargs(args))
1880 txnname=desc, **pycompat.strkwargs(args))
1873
1881
1874 if hook.hashook(repo.ui, 'txnclose-phase'):
1882 if hook.hashook(repo.ui, 'txnclose-phase'):
1875 cl = repo.unfiltered().changelog
1883 cl = repo.unfiltered().changelog
1876 phasemv = sorted(tr.changes['phases'].items())
1884 phasemv = sorted(tr.changes['phases'].items())
1877 for rev, (old, new) in phasemv:
1885 for rev, (old, new) in phasemv:
1878 args = tr.hookargs.copy()
1886 args = tr.hookargs.copy()
1879 node = hex(cl.node(rev))
1887 node = hex(cl.node(rev))
1880 args.update(phases.preparehookargs(node, old, new))
1888 args.update(phases.preparehookargs(node, old, new))
1881 repo.hook('txnclose-phase', throw=False, txnname=desc,
1889 repo.hook('txnclose-phase', throw=False, txnname=desc,
1882 **pycompat.strkwargs(args))
1890 **pycompat.strkwargs(args))
1883
1891
1884 repo.hook('txnclose', throw=False, txnname=desc,
1892 repo.hook('txnclose', throw=False, txnname=desc,
1885 **pycompat.strkwargs(hookargs))
1893 **pycompat.strkwargs(hookargs))
1886 reporef()._afterlock(hookfunc)
1894 reporef()._afterlock(hookfunc)
1887 tr.addfinalize('txnclose-hook', txnclosehook)
1895 tr.addfinalize('txnclose-hook', txnclosehook)
1888 # Include a leading "-" to make it happen before the transaction summary
1896 # Include a leading "-" to make it happen before the transaction summary
1889 # reports registered via scmutil.registersummarycallback() whose names
1897 # reports registered via scmutil.registersummarycallback() whose names
1890 # are 00-txnreport etc. That way, the caches will be warm when the
1898 # are 00-txnreport etc. That way, the caches will be warm when the
1891 # callbacks run.
1899 # callbacks run.
1892 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1900 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1893 def txnaborthook(tr2):
1901 def txnaborthook(tr2):
1894 """To be run if transaction is aborted
1902 """To be run if transaction is aborted
1895 """
1903 """
1896 reporef().hook('txnabort', throw=False, txnname=desc,
1904 reporef().hook('txnabort', throw=False, txnname=desc,
1897 **pycompat.strkwargs(tr2.hookargs))
1905 **pycompat.strkwargs(tr2.hookargs))
1898 tr.addabort('txnabort-hook', txnaborthook)
1906 tr.addabort('txnabort-hook', txnaborthook)
1899 # avoid eager cache invalidation. in-memory data should be identical
1907 # avoid eager cache invalidation. in-memory data should be identical
1900 # to stored data if transaction has no error.
1908 # to stored data if transaction has no error.
1901 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1909 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1902 self._transref = weakref.ref(tr)
1910 self._transref = weakref.ref(tr)
1903 scmutil.registersummarycallback(self, tr, desc)
1911 scmutil.registersummarycallback(self, tr, desc)
1904 return tr
1912 return tr
1905
1913
1906 def _journalfiles(self):
1914 def _journalfiles(self):
1907 return ((self.svfs, 'journal'),
1915 return ((self.svfs, 'journal'),
1908 (self.vfs, 'journal.dirstate'),
1916 (self.vfs, 'journal.dirstate'),
1909 (self.vfs, 'journal.branch'),
1917 (self.vfs, 'journal.branch'),
1910 (self.vfs, 'journal.desc'),
1918 (self.vfs, 'journal.desc'),
1911 (self.vfs, 'journal.bookmarks'),
1919 (self.vfs, 'journal.bookmarks'),
1912 (self.svfs, 'journal.phaseroots'))
1920 (self.svfs, 'journal.phaseroots'))
1913
1921
1914 def undofiles(self):
1922 def undofiles(self):
1915 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1923 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1916
1924
1917 @unfilteredmethod
1925 @unfilteredmethod
1918 def _writejournal(self, desc):
1926 def _writejournal(self, desc):
1919 self.dirstate.savebackup(None, 'journal.dirstate')
1927 self.dirstate.savebackup(None, 'journal.dirstate')
1920 narrowspec.savebackup(self, 'journal.narrowspec')
1928 narrowspec.savebackup(self, 'journal.narrowspec')
1921 self.vfs.write("journal.branch",
1929 self.vfs.write("journal.branch",
1922 encoding.fromlocal(self.dirstate.branch()))
1930 encoding.fromlocal(self.dirstate.branch()))
1923 self.vfs.write("journal.desc",
1931 self.vfs.write("journal.desc",
1924 "%d\n%s\n" % (len(self), desc))
1932 "%d\n%s\n" % (len(self), desc))
1925 self.vfs.write("journal.bookmarks",
1933 self.vfs.write("journal.bookmarks",
1926 self.vfs.tryread("bookmarks"))
1934 self.vfs.tryread("bookmarks"))
1927 self.svfs.write("journal.phaseroots",
1935 self.svfs.write("journal.phaseroots",
1928 self.svfs.tryread("phaseroots"))
1936 self.svfs.tryread("phaseroots"))
1929
1937
1930 def recover(self):
1938 def recover(self):
1931 with self.lock():
1939 with self.lock():
1932 if self.svfs.exists("journal"):
1940 if self.svfs.exists("journal"):
1933 self.ui.status(_("rolling back interrupted transaction\n"))
1941 self.ui.status(_("rolling back interrupted transaction\n"))
1934 vfsmap = {'': self.svfs,
1942 vfsmap = {'': self.svfs,
1935 'plain': self.vfs,}
1943 'plain': self.vfs,}
1936 transaction.rollback(self.svfs, vfsmap, "journal",
1944 transaction.rollback(self.svfs, vfsmap, "journal",
1937 self.ui.warn,
1945 self.ui.warn,
1938 checkambigfiles=_cachedfiles)
1946 checkambigfiles=_cachedfiles)
1939 self.invalidate()
1947 self.invalidate()
1940 return True
1948 return True
1941 else:
1949 else:
1942 self.ui.warn(_("no interrupted transaction available\n"))
1950 self.ui.warn(_("no interrupted transaction available\n"))
1943 return False
1951 return False
1944
1952
1945 def rollback(self, dryrun=False, force=False):
1953 def rollback(self, dryrun=False, force=False):
1946 wlock = lock = dsguard = None
1954 wlock = lock = dsguard = None
1947 try:
1955 try:
1948 wlock = self.wlock()
1956 wlock = self.wlock()
1949 lock = self.lock()
1957 lock = self.lock()
1950 if self.svfs.exists("undo"):
1958 if self.svfs.exists("undo"):
1951 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1959 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1952
1960
1953 return self._rollback(dryrun, force, dsguard)
1961 return self._rollback(dryrun, force, dsguard)
1954 else:
1962 else:
1955 self.ui.warn(_("no rollback information available\n"))
1963 self.ui.warn(_("no rollback information available\n"))
1956 return 1
1964 return 1
1957 finally:
1965 finally:
1958 release(dsguard, lock, wlock)
1966 release(dsguard, lock, wlock)
1959
1967
1960 @unfilteredmethod # Until we get smarter cache management
1968 @unfilteredmethod # Until we get smarter cache management
1961 def _rollback(self, dryrun, force, dsguard):
1969 def _rollback(self, dryrun, force, dsguard):
1962 ui = self.ui
1970 ui = self.ui
1963 try:
1971 try:
1964 args = self.vfs.read('undo.desc').splitlines()
1972 args = self.vfs.read('undo.desc').splitlines()
1965 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1973 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1966 if len(args) >= 3:
1974 if len(args) >= 3:
1967 detail = args[2]
1975 detail = args[2]
1968 oldtip = oldlen - 1
1976 oldtip = oldlen - 1
1969
1977
1970 if detail and ui.verbose:
1978 if detail and ui.verbose:
1971 msg = (_('repository tip rolled back to revision %d'
1979 msg = (_('repository tip rolled back to revision %d'
1972 ' (undo %s: %s)\n')
1980 ' (undo %s: %s)\n')
1973 % (oldtip, desc, detail))
1981 % (oldtip, desc, detail))
1974 else:
1982 else:
1975 msg = (_('repository tip rolled back to revision %d'
1983 msg = (_('repository tip rolled back to revision %d'
1976 ' (undo %s)\n')
1984 ' (undo %s)\n')
1977 % (oldtip, desc))
1985 % (oldtip, desc))
1978 except IOError:
1986 except IOError:
1979 msg = _('rolling back unknown transaction\n')
1987 msg = _('rolling back unknown transaction\n')
1980 desc = None
1988 desc = None
1981
1989
1982 if not force and self['.'] != self['tip'] and desc == 'commit':
1990 if not force and self['.'] != self['tip'] and desc == 'commit':
1983 raise error.Abort(
1991 raise error.Abort(
1984 _('rollback of last commit while not checked out '
1992 _('rollback of last commit while not checked out '
1985 'may lose data'), hint=_('use -f to force'))
1993 'may lose data'), hint=_('use -f to force'))
1986
1994
1987 ui.status(msg)
1995 ui.status(msg)
1988 if dryrun:
1996 if dryrun:
1989 return 0
1997 return 0
1990
1998
1991 parents = self.dirstate.parents()
1999 parents = self.dirstate.parents()
1992 self.destroying()
2000 self.destroying()
1993 vfsmap = {'plain': self.vfs, '': self.svfs}
2001 vfsmap = {'plain': self.vfs, '': self.svfs}
1994 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2002 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1995 checkambigfiles=_cachedfiles)
2003 checkambigfiles=_cachedfiles)
1996 if self.vfs.exists('undo.bookmarks'):
2004 if self.vfs.exists('undo.bookmarks'):
1997 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2005 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1998 if self.svfs.exists('undo.phaseroots'):
2006 if self.svfs.exists('undo.phaseroots'):
1999 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2007 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2000 self.invalidate()
2008 self.invalidate()
2001
2009
2002 parentgone = (parents[0] not in self.changelog.nodemap or
2010 parentgone = (parents[0] not in self.changelog.nodemap or
2003 parents[1] not in self.changelog.nodemap)
2011 parents[1] not in self.changelog.nodemap)
2004 if parentgone:
2012 if parentgone:
2005 # prevent dirstateguard from overwriting already restored one
2013 # prevent dirstateguard from overwriting already restored one
2006 dsguard.close()
2014 dsguard.close()
2007
2015
2008 narrowspec.restorebackup(self, 'undo.narrowspec')
2016 narrowspec.restorebackup(self, 'undo.narrowspec')
2009 self.dirstate.restorebackup(None, 'undo.dirstate')
2017 self.dirstate.restorebackup(None, 'undo.dirstate')
2010 try:
2018 try:
2011 branch = self.vfs.read('undo.branch')
2019 branch = self.vfs.read('undo.branch')
2012 self.dirstate.setbranch(encoding.tolocal(branch))
2020 self.dirstate.setbranch(encoding.tolocal(branch))
2013 except IOError:
2021 except IOError:
2014 ui.warn(_('named branch could not be reset: '
2022 ui.warn(_('named branch could not be reset: '
2015 'current branch is still \'%s\'\n')
2023 'current branch is still \'%s\'\n')
2016 % self.dirstate.branch())
2024 % self.dirstate.branch())
2017
2025
2018 parents = tuple([p.rev() for p in self[None].parents()])
2026 parents = tuple([p.rev() for p in self[None].parents()])
2019 if len(parents) > 1:
2027 if len(parents) > 1:
2020 ui.status(_('working directory now based on '
2028 ui.status(_('working directory now based on '
2021 'revisions %d and %d\n') % parents)
2029 'revisions %d and %d\n') % parents)
2022 else:
2030 else:
2023 ui.status(_('working directory now based on '
2031 ui.status(_('working directory now based on '
2024 'revision %d\n') % parents)
2032 'revision %d\n') % parents)
2025 mergemod.mergestate.clean(self, self['.'].node())
2033 mergemod.mergestate.clean(self, self['.'].node())
2026
2034
2027 # TODO: if we know which new heads may result from this rollback, pass
2035 # TODO: if we know which new heads may result from this rollback, pass
2028 # them to destroy(), which will prevent the branchhead cache from being
2036 # them to destroy(), which will prevent the branchhead cache from being
2029 # invalidated.
2037 # invalidated.
2030 self.destroyed()
2038 self.destroyed()
2031 return 0
2039 return 0
2032
2040
2033 def _buildcacheupdater(self, newtransaction):
2041 def _buildcacheupdater(self, newtransaction):
2034 """called during transaction to build the callback updating cache
2042 """called during transaction to build the callback updating cache
2035
2043
2036 Lives on the repository to help extension who might want to augment
2044 Lives on the repository to help extension who might want to augment
2037 this logic. For this purpose, the created transaction is passed to the
2045 this logic. For this purpose, the created transaction is passed to the
2038 method.
2046 method.
2039 """
2047 """
2040 # we must avoid cyclic reference between repo and transaction.
2048 # we must avoid cyclic reference between repo and transaction.
2041 reporef = weakref.ref(self)
2049 reporef = weakref.ref(self)
2042 def updater(tr):
2050 def updater(tr):
2043 repo = reporef()
2051 repo = reporef()
2044 repo.updatecaches(tr)
2052 repo.updatecaches(tr)
2045 return updater
2053 return updater
2046
2054
2047 @unfilteredmethod
2055 @unfilteredmethod
2048 def updatecaches(self, tr=None, full=False):
2056 def updatecaches(self, tr=None, full=False):
2049 """warm appropriate caches
2057 """warm appropriate caches
2050
2058
2051 If this function is called after a transaction closed. The transaction
2059 If this function is called after a transaction closed. The transaction
2052 will be available in the 'tr' argument. This can be used to selectively
2060 will be available in the 'tr' argument. This can be used to selectively
2053 update caches relevant to the changes in that transaction.
2061 update caches relevant to the changes in that transaction.
2054
2062
2055 If 'full' is set, make sure all caches the function knows about have
2063 If 'full' is set, make sure all caches the function knows about have
2056 up-to-date data. Even the ones usually loaded more lazily.
2064 up-to-date data. Even the ones usually loaded more lazily.
2057 """
2065 """
2058 if tr is not None and tr.hookargs.get('source') == 'strip':
2066 if tr is not None and tr.hookargs.get('source') == 'strip':
2059 # During strip, many caches are invalid but
2067 # During strip, many caches are invalid but
2060 # later call to `destroyed` will refresh them.
2068 # later call to `destroyed` will refresh them.
2061 return
2069 return
2062
2070
2063 if tr is None or tr.changes['origrepolen'] < len(self):
2071 if tr is None or tr.changes['origrepolen'] < len(self):
2064 # updating the unfiltered branchmap should refresh all the others,
2072 # updating the unfiltered branchmap should refresh all the others,
2065 self.ui.debug('updating the branch cache\n')
2073 self.ui.debug('updating the branch cache\n')
2066 branchmap.updatecache(self.filtered('served'))
2074 branchmap.updatecache(self.filtered('served'))
2067
2075
2068 if full:
2076 if full:
2069 rbc = self.revbranchcache()
2077 rbc = self.revbranchcache()
2070 for r in self.changelog:
2078 for r in self.changelog:
2071 rbc.branchinfo(r)
2079 rbc.branchinfo(r)
2072 rbc.write()
2080 rbc.write()
2073
2081
2074 # ensure the working copy parents are in the manifestfulltextcache
2082 # ensure the working copy parents are in the manifestfulltextcache
2075 for ctx in self['.'].parents():
2083 for ctx in self['.'].parents():
2076 ctx.manifest() # accessing the manifest is enough
2084 ctx.manifest() # accessing the manifest is enough
2077
2085
2078 def invalidatecaches(self):
2086 def invalidatecaches(self):
2079
2087
2080 if r'_tagscache' in vars(self):
2088 if r'_tagscache' in vars(self):
2081 # can't use delattr on proxy
2089 # can't use delattr on proxy
2082 del self.__dict__[r'_tagscache']
2090 del self.__dict__[r'_tagscache']
2083
2091
2084 self.unfiltered()._branchcaches.clear()
2092 self.unfiltered()._branchcaches.clear()
2085 self.invalidatevolatilesets()
2093 self.invalidatevolatilesets()
2086 self._sparsesignaturecache.clear()
2094 self._sparsesignaturecache.clear()
2087
2095
2088 def invalidatevolatilesets(self):
2096 def invalidatevolatilesets(self):
2089 self.filteredrevcache.clear()
2097 self.filteredrevcache.clear()
2090 obsolete.clearobscaches(self)
2098 obsolete.clearobscaches(self)
2091
2099
2092 def invalidatedirstate(self):
2100 def invalidatedirstate(self):
2093 '''Invalidates the dirstate, causing the next call to dirstate
2101 '''Invalidates the dirstate, causing the next call to dirstate
2094 to check if it was modified since the last time it was read,
2102 to check if it was modified since the last time it was read,
2095 rereading it if it has.
2103 rereading it if it has.
2096
2104
2097 This is different to dirstate.invalidate() that it doesn't always
2105 This is different to dirstate.invalidate() that it doesn't always
2098 rereads the dirstate. Use dirstate.invalidate() if you want to
2106 rereads the dirstate. Use dirstate.invalidate() if you want to
2099 explicitly read the dirstate again (i.e. restoring it to a previous
2107 explicitly read the dirstate again (i.e. restoring it to a previous
2100 known good state).'''
2108 known good state).'''
2101 if hasunfilteredcache(self, r'dirstate'):
2109 if hasunfilteredcache(self, r'dirstate'):
2102 for k in self.dirstate._filecache:
2110 for k in self.dirstate._filecache:
2103 try:
2111 try:
2104 delattr(self.dirstate, k)
2112 delattr(self.dirstate, k)
2105 except AttributeError:
2113 except AttributeError:
2106 pass
2114 pass
2107 delattr(self.unfiltered(), r'dirstate')
2115 delattr(self.unfiltered(), r'dirstate')
2108
2116
2109 def invalidate(self, clearfilecache=False):
2117 def invalidate(self, clearfilecache=False):
2110 '''Invalidates both store and non-store parts other than dirstate
2118 '''Invalidates both store and non-store parts other than dirstate
2111
2119
2112 If a transaction is running, invalidation of store is omitted,
2120 If a transaction is running, invalidation of store is omitted,
2113 because discarding in-memory changes might cause inconsistency
2121 because discarding in-memory changes might cause inconsistency
2114 (e.g. incomplete fncache causes unintentional failure, but
2122 (e.g. incomplete fncache causes unintentional failure, but
2115 redundant one doesn't).
2123 redundant one doesn't).
2116 '''
2124 '''
2117 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2125 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2118 for k in list(self._filecache.keys()):
2126 for k in list(self._filecache.keys()):
2119 # dirstate is invalidated separately in invalidatedirstate()
2127 # dirstate is invalidated separately in invalidatedirstate()
2120 if k == 'dirstate':
2128 if k == 'dirstate':
2121 continue
2129 continue
2122 if (k == 'changelog' and
2130 if (k == 'changelog' and
2123 self.currenttransaction() and
2131 self.currenttransaction() and
2124 self.changelog._delayed):
2132 self.changelog._delayed):
2125 # The changelog object may store unwritten revisions. We don't
2133 # The changelog object may store unwritten revisions. We don't
2126 # want to lose them.
2134 # want to lose them.
2127 # TODO: Solve the problem instead of working around it.
2135 # TODO: Solve the problem instead of working around it.
2128 continue
2136 continue
2129
2137
2130 if clearfilecache:
2138 if clearfilecache:
2131 del self._filecache[k]
2139 del self._filecache[k]
2132 try:
2140 try:
2133 delattr(unfiltered, k)
2141 delattr(unfiltered, k)
2134 except AttributeError:
2142 except AttributeError:
2135 pass
2143 pass
2136 self.invalidatecaches()
2144 self.invalidatecaches()
2137 if not self.currenttransaction():
2145 if not self.currenttransaction():
2138 # TODO: Changing contents of store outside transaction
2146 # TODO: Changing contents of store outside transaction
2139 # causes inconsistency. We should make in-memory store
2147 # causes inconsistency. We should make in-memory store
2140 # changes detectable, and abort if changed.
2148 # changes detectable, and abort if changed.
2141 self.store.invalidatecaches()
2149 self.store.invalidatecaches()
2142
2150
2143 def invalidateall(self):
2151 def invalidateall(self):
2144 '''Fully invalidates both store and non-store parts, causing the
2152 '''Fully invalidates both store and non-store parts, causing the
2145 subsequent operation to reread any outside changes.'''
2153 subsequent operation to reread any outside changes.'''
2146 # extension should hook this to invalidate its caches
2154 # extension should hook this to invalidate its caches
2147 self.invalidate()
2155 self.invalidate()
2148 self.invalidatedirstate()
2156 self.invalidatedirstate()
2149
2157
2150 @unfilteredmethod
2158 @unfilteredmethod
2151 def _refreshfilecachestats(self, tr):
2159 def _refreshfilecachestats(self, tr):
2152 """Reload stats of cached files so that they are flagged as valid"""
2160 """Reload stats of cached files so that they are flagged as valid"""
2153 for k, ce in self._filecache.items():
2161 for k, ce in self._filecache.items():
2154 k = pycompat.sysstr(k)
2162 k = pycompat.sysstr(k)
2155 if k == r'dirstate' or k not in self.__dict__:
2163 if k == r'dirstate' or k not in self.__dict__:
2156 continue
2164 continue
2157 ce.refresh()
2165 ce.refresh()
2158
2166
2159 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2167 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2160 inheritchecker=None, parentenvvar=None):
2168 inheritchecker=None, parentenvvar=None):
2161 parentlock = None
2169 parentlock = None
2162 # the contents of parentenvvar are used by the underlying lock to
2170 # the contents of parentenvvar are used by the underlying lock to
2163 # determine whether it can be inherited
2171 # determine whether it can be inherited
2164 if parentenvvar is not None:
2172 if parentenvvar is not None:
2165 parentlock = encoding.environ.get(parentenvvar)
2173 parentlock = encoding.environ.get(parentenvvar)
2166
2174
2167 timeout = 0
2175 timeout = 0
2168 warntimeout = 0
2176 warntimeout = 0
2169 if wait:
2177 if wait:
2170 timeout = self.ui.configint("ui", "timeout")
2178 timeout = self.ui.configint("ui", "timeout")
2171 warntimeout = self.ui.configint("ui", "timeout.warn")
2179 warntimeout = self.ui.configint("ui", "timeout.warn")
2172 # internal config: ui.signal-safe-lock
2180 # internal config: ui.signal-safe-lock
2173 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2181 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2174
2182
2175 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2183 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2176 releasefn=releasefn,
2184 releasefn=releasefn,
2177 acquirefn=acquirefn, desc=desc,
2185 acquirefn=acquirefn, desc=desc,
2178 inheritchecker=inheritchecker,
2186 inheritchecker=inheritchecker,
2179 parentlock=parentlock,
2187 parentlock=parentlock,
2180 signalsafe=signalsafe)
2188 signalsafe=signalsafe)
2181 return l
2189 return l
2182
2190
2183 def _afterlock(self, callback):
2191 def _afterlock(self, callback):
2184 """add a callback to be run when the repository is fully unlocked
2192 """add a callback to be run when the repository is fully unlocked
2185
2193
2186 The callback will be executed when the outermost lock is released
2194 The callback will be executed when the outermost lock is released
2187 (with wlock being higher level than 'lock')."""
2195 (with wlock being higher level than 'lock')."""
2188 for ref in (self._wlockref, self._lockref):
2196 for ref in (self._wlockref, self._lockref):
2189 l = ref and ref()
2197 l = ref and ref()
2190 if l and l.held:
2198 if l and l.held:
2191 l.postrelease.append(callback)
2199 l.postrelease.append(callback)
2192 break
2200 break
2193 else: # no lock have been found.
2201 else: # no lock have been found.
2194 callback()
2202 callback()
2195
2203
2196 def lock(self, wait=True):
2204 def lock(self, wait=True):
2197 '''Lock the repository store (.hg/store) and return a weak reference
2205 '''Lock the repository store (.hg/store) and return a weak reference
2198 to the lock. Use this before modifying the store (e.g. committing or
2206 to the lock. Use this before modifying the store (e.g. committing or
2199 stripping). If you are opening a transaction, get a lock as well.)
2207 stripping). If you are opening a transaction, get a lock as well.)
2200
2208
2201 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2209 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2202 'wlock' first to avoid a dead-lock hazard.'''
2210 'wlock' first to avoid a dead-lock hazard.'''
2203 l = self._currentlock(self._lockref)
2211 l = self._currentlock(self._lockref)
2204 if l is not None:
2212 if l is not None:
2205 l.lock()
2213 l.lock()
2206 return l
2214 return l
2207
2215
2208 l = self._lock(self.svfs, "lock", wait, None,
2216 l = self._lock(self.svfs, "lock", wait, None,
2209 self.invalidate, _('repository %s') % self.origroot)
2217 self.invalidate, _('repository %s') % self.origroot)
2210 self._lockref = weakref.ref(l)
2218 self._lockref = weakref.ref(l)
2211 return l
2219 return l
2212
2220
2213 def _wlockchecktransaction(self):
2221 def _wlockchecktransaction(self):
2214 if self.currenttransaction() is not None:
2222 if self.currenttransaction() is not None:
2215 raise error.LockInheritanceContractViolation(
2223 raise error.LockInheritanceContractViolation(
2216 'wlock cannot be inherited in the middle of a transaction')
2224 'wlock cannot be inherited in the middle of a transaction')
2217
2225
2218 def wlock(self, wait=True):
2226 def wlock(self, wait=True):
2219 '''Lock the non-store parts of the repository (everything under
2227 '''Lock the non-store parts of the repository (everything under
2220 .hg except .hg/store) and return a weak reference to the lock.
2228 .hg except .hg/store) and return a weak reference to the lock.
2221
2229
2222 Use this before modifying files in .hg.
2230 Use this before modifying files in .hg.
2223
2231
2224 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2232 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2225 'wlock' first to avoid a dead-lock hazard.'''
2233 'wlock' first to avoid a dead-lock hazard.'''
2226 l = self._wlockref and self._wlockref()
2234 l = self._wlockref and self._wlockref()
2227 if l is not None and l.held:
2235 if l is not None and l.held:
2228 l.lock()
2236 l.lock()
2229 return l
2237 return l
2230
2238
2231 # We do not need to check for non-waiting lock acquisition. Such
2239 # We do not need to check for non-waiting lock acquisition. Such
2232 # acquisition would not cause dead-lock as they would just fail.
2240 # acquisition would not cause dead-lock as they would just fail.
2233 if wait and (self.ui.configbool('devel', 'all-warnings')
2241 if wait and (self.ui.configbool('devel', 'all-warnings')
2234 or self.ui.configbool('devel', 'check-locks')):
2242 or self.ui.configbool('devel', 'check-locks')):
2235 if self._currentlock(self._lockref) is not None:
2243 if self._currentlock(self._lockref) is not None:
2236 self.ui.develwarn('"wlock" acquired after "lock"')
2244 self.ui.develwarn('"wlock" acquired after "lock"')
2237
2245
2238 def unlock():
2246 def unlock():
2239 if self.dirstate.pendingparentchange():
2247 if self.dirstate.pendingparentchange():
2240 self.dirstate.invalidate()
2248 self.dirstate.invalidate()
2241 else:
2249 else:
2242 self.dirstate.write(None)
2250 self.dirstate.write(None)
2243
2251
2244 self._filecache['dirstate'].refresh()
2252 self._filecache['dirstate'].refresh()
2245
2253
2246 l = self._lock(self.vfs, "wlock", wait, unlock,
2254 l = self._lock(self.vfs, "wlock", wait, unlock,
2247 self.invalidatedirstate, _('working directory of %s') %
2255 self.invalidatedirstate, _('working directory of %s') %
2248 self.origroot,
2256 self.origroot,
2249 inheritchecker=self._wlockchecktransaction,
2257 inheritchecker=self._wlockchecktransaction,
2250 parentenvvar='HG_WLOCK_LOCKER')
2258 parentenvvar='HG_WLOCK_LOCKER')
2251 self._wlockref = weakref.ref(l)
2259 self._wlockref = weakref.ref(l)
2252 return l
2260 return l
2253
2261
2254 def _currentlock(self, lockref):
2262 def _currentlock(self, lockref):
2255 """Returns the lock if it's held, or None if it's not."""
2263 """Returns the lock if it's held, or None if it's not."""
2256 if lockref is None:
2264 if lockref is None:
2257 return None
2265 return None
2258 l = lockref()
2266 l = lockref()
2259 if l is None or not l.held:
2267 if l is None or not l.held:
2260 return None
2268 return None
2261 return l
2269 return l
2262
2270
2263 def currentwlock(self):
2271 def currentwlock(self):
2264 """Returns the wlock if it's held, or None if it's not."""
2272 """Returns the wlock if it's held, or None if it's not."""
2265 return self._currentlock(self._wlockref)
2273 return self._currentlock(self._wlockref)
2266
2274
2267 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2275 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2268 """
2276 """
2269 commit an individual file as part of a larger transaction
2277 commit an individual file as part of a larger transaction
2270 """
2278 """
2271
2279
2272 fname = fctx.path()
2280 fname = fctx.path()
2273 fparent1 = manifest1.get(fname, nullid)
2281 fparent1 = manifest1.get(fname, nullid)
2274 fparent2 = manifest2.get(fname, nullid)
2282 fparent2 = manifest2.get(fname, nullid)
2275 if isinstance(fctx, context.filectx):
2283 if isinstance(fctx, context.filectx):
2276 node = fctx.filenode()
2284 node = fctx.filenode()
2277 if node in [fparent1, fparent2]:
2285 if node in [fparent1, fparent2]:
2278 self.ui.debug('reusing %s filelog entry\n' % fname)
2286 self.ui.debug('reusing %s filelog entry\n' % fname)
2279 if manifest1.flags(fname) != fctx.flags():
2287 if manifest1.flags(fname) != fctx.flags():
2280 changelist.append(fname)
2288 changelist.append(fname)
2281 return node
2289 return node
2282
2290
2283 flog = self.file(fname)
2291 flog = self.file(fname)
2284 meta = {}
2292 meta = {}
2285 copy = fctx.renamed()
2293 copy = fctx.renamed()
2286 if copy and copy[0] != fname:
2294 if copy and copy[0] != fname:
2287 # Mark the new revision of this file as a copy of another
2295 # Mark the new revision of this file as a copy of another
2288 # file. This copy data will effectively act as a parent
2296 # file. This copy data will effectively act as a parent
2289 # of this new revision. If this is a merge, the first
2297 # of this new revision. If this is a merge, the first
2290 # parent will be the nullid (meaning "look up the copy data")
2298 # parent will be the nullid (meaning "look up the copy data")
2291 # and the second one will be the other parent. For example:
2299 # and the second one will be the other parent. For example:
2292 #
2300 #
2293 # 0 --- 1 --- 3 rev1 changes file foo
2301 # 0 --- 1 --- 3 rev1 changes file foo
2294 # \ / rev2 renames foo to bar and changes it
2302 # \ / rev2 renames foo to bar and changes it
2295 # \- 2 -/ rev3 should have bar with all changes and
2303 # \- 2 -/ rev3 should have bar with all changes and
2296 # should record that bar descends from
2304 # should record that bar descends from
2297 # bar in rev2 and foo in rev1
2305 # bar in rev2 and foo in rev1
2298 #
2306 #
2299 # this allows this merge to succeed:
2307 # this allows this merge to succeed:
2300 #
2308 #
2301 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2309 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2302 # \ / merging rev3 and rev4 should use bar@rev2
2310 # \ / merging rev3 and rev4 should use bar@rev2
2303 # \- 2 --- 4 as the merge base
2311 # \- 2 --- 4 as the merge base
2304 #
2312 #
2305
2313
2306 cfname = copy[0]
2314 cfname = copy[0]
2307 crev = manifest1.get(cfname)
2315 crev = manifest1.get(cfname)
2308 newfparent = fparent2
2316 newfparent = fparent2
2309
2317
2310 if manifest2: # branch merge
2318 if manifest2: # branch merge
2311 if fparent2 == nullid or crev is None: # copied on remote side
2319 if fparent2 == nullid or crev is None: # copied on remote side
2312 if cfname in manifest2:
2320 if cfname in manifest2:
2313 crev = manifest2[cfname]
2321 crev = manifest2[cfname]
2314 newfparent = fparent1
2322 newfparent = fparent1
2315
2323
2316 # Here, we used to search backwards through history to try to find
2324 # Here, we used to search backwards through history to try to find
2317 # where the file copy came from if the source of a copy was not in
2325 # where the file copy came from if the source of a copy was not in
2318 # the parent directory. However, this doesn't actually make sense to
2326 # the parent directory. However, this doesn't actually make sense to
2319 # do (what does a copy from something not in your working copy even
2327 # do (what does a copy from something not in your working copy even
2320 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2328 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2321 # the user that copy information was dropped, so if they didn't
2329 # the user that copy information was dropped, so if they didn't
2322 # expect this outcome it can be fixed, but this is the correct
2330 # expect this outcome it can be fixed, but this is the correct
2323 # behavior in this circumstance.
2331 # behavior in this circumstance.
2324
2332
2325 if crev:
2333 if crev:
2326 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2334 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2327 meta["copy"] = cfname
2335 meta["copy"] = cfname
2328 meta["copyrev"] = hex(crev)
2336 meta["copyrev"] = hex(crev)
2329 fparent1, fparent2 = nullid, newfparent
2337 fparent1, fparent2 = nullid, newfparent
2330 else:
2338 else:
2331 self.ui.warn(_("warning: can't find ancestor for '%s' "
2339 self.ui.warn(_("warning: can't find ancestor for '%s' "
2332 "copied from '%s'!\n") % (fname, cfname))
2340 "copied from '%s'!\n") % (fname, cfname))
2333
2341
2334 elif fparent1 == nullid:
2342 elif fparent1 == nullid:
2335 fparent1, fparent2 = fparent2, nullid
2343 fparent1, fparent2 = fparent2, nullid
2336 elif fparent2 != nullid:
2344 elif fparent2 != nullid:
2337 # is one parent an ancestor of the other?
2345 # is one parent an ancestor of the other?
2338 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2346 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2339 if fparent1 in fparentancestors:
2347 if fparent1 in fparentancestors:
2340 fparent1, fparent2 = fparent2, nullid
2348 fparent1, fparent2 = fparent2, nullid
2341 elif fparent2 in fparentancestors:
2349 elif fparent2 in fparentancestors:
2342 fparent2 = nullid
2350 fparent2 = nullid
2343
2351
2344 # is the file changed?
2352 # is the file changed?
2345 text = fctx.data()
2353 text = fctx.data()
2346 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2354 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2347 changelist.append(fname)
2355 changelist.append(fname)
2348 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2356 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2349 # are just the flags changed during merge?
2357 # are just the flags changed during merge?
2350 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2358 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2351 changelist.append(fname)
2359 changelist.append(fname)
2352
2360
2353 return fparent1
2361 return fparent1
2354
2362
2355 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2363 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2356 """check for commit arguments that aren't committable"""
2364 """check for commit arguments that aren't committable"""
2357 if match.isexact() or match.prefix():
2365 if match.isexact() or match.prefix():
2358 matched = set(status.modified + status.added + status.removed)
2366 matched = set(status.modified + status.added + status.removed)
2359
2367
2360 for f in match.files():
2368 for f in match.files():
2361 f = self.dirstate.normalize(f)
2369 f = self.dirstate.normalize(f)
2362 if f == '.' or f in matched or f in wctx.substate:
2370 if f == '.' or f in matched or f in wctx.substate:
2363 continue
2371 continue
2364 if f in status.deleted:
2372 if f in status.deleted:
2365 fail(f, _('file not found!'))
2373 fail(f, _('file not found!'))
2366 if f in vdirs: # visited directory
2374 if f in vdirs: # visited directory
2367 d = f + '/'
2375 d = f + '/'
2368 for mf in matched:
2376 for mf in matched:
2369 if mf.startswith(d):
2377 if mf.startswith(d):
2370 break
2378 break
2371 else:
2379 else:
2372 fail(f, _("no match under directory!"))
2380 fail(f, _("no match under directory!"))
2373 elif f not in self.dirstate:
2381 elif f not in self.dirstate:
2374 fail(f, _("file not tracked!"))
2382 fail(f, _("file not tracked!"))
2375
2383
2376 @unfilteredmethod
2384 @unfilteredmethod
2377 def commit(self, text="", user=None, date=None, match=None, force=False,
2385 def commit(self, text="", user=None, date=None, match=None, force=False,
2378 editor=False, extra=None):
2386 editor=False, extra=None):
2379 """Add a new revision to current repository.
2387 """Add a new revision to current repository.
2380
2388
2381 Revision information is gathered from the working directory,
2389 Revision information is gathered from the working directory,
2382 match can be used to filter the committed files. If editor is
2390 match can be used to filter the committed files. If editor is
2383 supplied, it is called to get a commit message.
2391 supplied, it is called to get a commit message.
2384 """
2392 """
2385 if extra is None:
2393 if extra is None:
2386 extra = {}
2394 extra = {}
2387
2395
2388 def fail(f, msg):
2396 def fail(f, msg):
2389 raise error.Abort('%s: %s' % (f, msg))
2397 raise error.Abort('%s: %s' % (f, msg))
2390
2398
2391 if not match:
2399 if not match:
2392 match = matchmod.always(self.root, '')
2400 match = matchmod.always(self.root, '')
2393
2401
2394 if not force:
2402 if not force:
2395 vdirs = []
2403 vdirs = []
2396 match.explicitdir = vdirs.append
2404 match.explicitdir = vdirs.append
2397 match.bad = fail
2405 match.bad = fail
2398
2406
2399 wlock = lock = tr = None
2407 wlock = lock = tr = None
2400 try:
2408 try:
2401 wlock = self.wlock()
2409 wlock = self.wlock()
2402 lock = self.lock() # for recent changelog (see issue4368)
2410 lock = self.lock() # for recent changelog (see issue4368)
2403
2411
2404 wctx = self[None]
2412 wctx = self[None]
2405 merge = len(wctx.parents()) > 1
2413 merge = len(wctx.parents()) > 1
2406
2414
2407 if not force and merge and not match.always():
2415 if not force and merge and not match.always():
2408 raise error.Abort(_('cannot partially commit a merge '
2416 raise error.Abort(_('cannot partially commit a merge '
2409 '(do not specify files or patterns)'))
2417 '(do not specify files or patterns)'))
2410
2418
2411 status = self.status(match=match, clean=force)
2419 status = self.status(match=match, clean=force)
2412 if force:
2420 if force:
2413 status.modified.extend(status.clean) # mq may commit clean files
2421 status.modified.extend(status.clean) # mq may commit clean files
2414
2422
2415 # check subrepos
2423 # check subrepos
2416 subs, commitsubs, newstate = subrepoutil.precommit(
2424 subs, commitsubs, newstate = subrepoutil.precommit(
2417 self.ui, wctx, status, match, force=force)
2425 self.ui, wctx, status, match, force=force)
2418
2426
2419 # make sure all explicit patterns are matched
2427 # make sure all explicit patterns are matched
2420 if not force:
2428 if not force:
2421 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2429 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2422
2430
2423 cctx = context.workingcommitctx(self, status,
2431 cctx = context.workingcommitctx(self, status,
2424 text, user, date, extra)
2432 text, user, date, extra)
2425
2433
2426 # internal config: ui.allowemptycommit
2434 # internal config: ui.allowemptycommit
2427 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2435 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2428 or extra.get('close') or merge or cctx.files()
2436 or extra.get('close') or merge or cctx.files()
2429 or self.ui.configbool('ui', 'allowemptycommit'))
2437 or self.ui.configbool('ui', 'allowemptycommit'))
2430 if not allowemptycommit:
2438 if not allowemptycommit:
2431 return None
2439 return None
2432
2440
2433 if merge and cctx.deleted():
2441 if merge and cctx.deleted():
2434 raise error.Abort(_("cannot commit merge with missing files"))
2442 raise error.Abort(_("cannot commit merge with missing files"))
2435
2443
2436 ms = mergemod.mergestate.read(self)
2444 ms = mergemod.mergestate.read(self)
2437 mergeutil.checkunresolved(ms)
2445 mergeutil.checkunresolved(ms)
2438
2446
2439 if editor:
2447 if editor:
2440 cctx._text = editor(self, cctx, subs)
2448 cctx._text = editor(self, cctx, subs)
2441 edited = (text != cctx._text)
2449 edited = (text != cctx._text)
2442
2450
2443 # Save commit message in case this transaction gets rolled back
2451 # Save commit message in case this transaction gets rolled back
2444 # (e.g. by a pretxncommit hook). Leave the content alone on
2452 # (e.g. by a pretxncommit hook). Leave the content alone on
2445 # the assumption that the user will use the same editor again.
2453 # the assumption that the user will use the same editor again.
2446 msgfn = self.savecommitmessage(cctx._text)
2454 msgfn = self.savecommitmessage(cctx._text)
2447
2455
2448 # commit subs and write new state
2456 # commit subs and write new state
2449 if subs:
2457 if subs:
2450 for s in sorted(commitsubs):
2458 for s in sorted(commitsubs):
2451 sub = wctx.sub(s)
2459 sub = wctx.sub(s)
2452 self.ui.status(_('committing subrepository %s\n') %
2460 self.ui.status(_('committing subrepository %s\n') %
2453 subrepoutil.subrelpath(sub))
2461 subrepoutil.subrelpath(sub))
2454 sr = sub.commit(cctx._text, user, date)
2462 sr = sub.commit(cctx._text, user, date)
2455 newstate[s] = (newstate[s][0], sr)
2463 newstate[s] = (newstate[s][0], sr)
2456 subrepoutil.writestate(self, newstate)
2464 subrepoutil.writestate(self, newstate)
2457
2465
2458 p1, p2 = self.dirstate.parents()
2466 p1, p2 = self.dirstate.parents()
2459 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2467 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2460 try:
2468 try:
2461 self.hook("precommit", throw=True, parent1=hookp1,
2469 self.hook("precommit", throw=True, parent1=hookp1,
2462 parent2=hookp2)
2470 parent2=hookp2)
2463 tr = self.transaction('commit')
2471 tr = self.transaction('commit')
2464 ret = self.commitctx(cctx, True)
2472 ret = self.commitctx(cctx, True)
2465 except: # re-raises
2473 except: # re-raises
2466 if edited:
2474 if edited:
2467 self.ui.write(
2475 self.ui.write(
2468 _('note: commit message saved in %s\n') % msgfn)
2476 _('note: commit message saved in %s\n') % msgfn)
2469 raise
2477 raise
2470 # update bookmarks, dirstate and mergestate
2478 # update bookmarks, dirstate and mergestate
2471 bookmarks.update(self, [p1, p2], ret)
2479 bookmarks.update(self, [p1, p2], ret)
2472 cctx.markcommitted(ret)
2480 cctx.markcommitted(ret)
2473 ms.reset()
2481 ms.reset()
2474 tr.close()
2482 tr.close()
2475
2483
2476 finally:
2484 finally:
2477 lockmod.release(tr, lock, wlock)
2485 lockmod.release(tr, lock, wlock)
2478
2486
2479 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2487 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2480 # hack for command that use a temporary commit (eg: histedit)
2488 # hack for command that use a temporary commit (eg: histedit)
2481 # temporary commit got stripped before hook release
2489 # temporary commit got stripped before hook release
2482 if self.changelog.hasnode(ret):
2490 if self.changelog.hasnode(ret):
2483 self.hook("commit", node=node, parent1=parent1,
2491 self.hook("commit", node=node, parent1=parent1,
2484 parent2=parent2)
2492 parent2=parent2)
2485 self._afterlock(commithook)
2493 self._afterlock(commithook)
2486 return ret
2494 return ret
2487
2495
2488 @unfilteredmethod
2496 @unfilteredmethod
2489 def commitctx(self, ctx, error=False):
2497 def commitctx(self, ctx, error=False):
2490 """Add a new revision to current repository.
2498 """Add a new revision to current repository.
2491 Revision information is passed via the context argument.
2499 Revision information is passed via the context argument.
2492
2500
2493 ctx.files() should list all files involved in this commit, i.e.
2501 ctx.files() should list all files involved in this commit, i.e.
2494 modified/added/removed files. On merge, it may be wider than the
2502 modified/added/removed files. On merge, it may be wider than the
2495 ctx.files() to be committed, since any file nodes derived directly
2503 ctx.files() to be committed, since any file nodes derived directly
2496 from p1 or p2 are excluded from the committed ctx.files().
2504 from p1 or p2 are excluded from the committed ctx.files().
2497 """
2505 """
2498
2506
2499 tr = None
2507 tr = None
2500 p1, p2 = ctx.p1(), ctx.p2()
2508 p1, p2 = ctx.p1(), ctx.p2()
2501 user = ctx.user()
2509 user = ctx.user()
2502
2510
2503 lock = self.lock()
2511 lock = self.lock()
2504 try:
2512 try:
2505 tr = self.transaction("commit")
2513 tr = self.transaction("commit")
2506 trp = weakref.proxy(tr)
2514 trp = weakref.proxy(tr)
2507
2515
2508 if ctx.manifestnode():
2516 if ctx.manifestnode():
2509 # reuse an existing manifest revision
2517 # reuse an existing manifest revision
2510 self.ui.debug('reusing known manifest\n')
2518 self.ui.debug('reusing known manifest\n')
2511 mn = ctx.manifestnode()
2519 mn = ctx.manifestnode()
2512 files = ctx.files()
2520 files = ctx.files()
2513 elif ctx.files():
2521 elif ctx.files():
2514 m1ctx = p1.manifestctx()
2522 m1ctx = p1.manifestctx()
2515 m2ctx = p2.manifestctx()
2523 m2ctx = p2.manifestctx()
2516 mctx = m1ctx.copy()
2524 mctx = m1ctx.copy()
2517
2525
2518 m = mctx.read()
2526 m = mctx.read()
2519 m1 = m1ctx.read()
2527 m1 = m1ctx.read()
2520 m2 = m2ctx.read()
2528 m2 = m2ctx.read()
2521
2529
2522 # check in files
2530 # check in files
2523 added = []
2531 added = []
2524 changed = []
2532 changed = []
2525 removed = list(ctx.removed())
2533 removed = list(ctx.removed())
2526 linkrev = len(self)
2534 linkrev = len(self)
2527 self.ui.note(_("committing files:\n"))
2535 self.ui.note(_("committing files:\n"))
2528 for f in sorted(ctx.modified() + ctx.added()):
2536 for f in sorted(ctx.modified() + ctx.added()):
2529 self.ui.note(f + "\n")
2537 self.ui.note(f + "\n")
2530 try:
2538 try:
2531 fctx = ctx[f]
2539 fctx = ctx[f]
2532 if fctx is None:
2540 if fctx is None:
2533 removed.append(f)
2541 removed.append(f)
2534 else:
2542 else:
2535 added.append(f)
2543 added.append(f)
2536 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2544 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2537 trp, changed)
2545 trp, changed)
2538 m.setflag(f, fctx.flags())
2546 m.setflag(f, fctx.flags())
2539 except OSError as inst:
2547 except OSError as inst:
2540 self.ui.warn(_("trouble committing %s!\n") % f)
2548 self.ui.warn(_("trouble committing %s!\n") % f)
2541 raise
2549 raise
2542 except IOError as inst:
2550 except IOError as inst:
2543 errcode = getattr(inst, 'errno', errno.ENOENT)
2551 errcode = getattr(inst, 'errno', errno.ENOENT)
2544 if error or errcode and errcode != errno.ENOENT:
2552 if error or errcode and errcode != errno.ENOENT:
2545 self.ui.warn(_("trouble committing %s!\n") % f)
2553 self.ui.warn(_("trouble committing %s!\n") % f)
2546 raise
2554 raise
2547
2555
2548 # update manifest
2556 # update manifest
2549 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2557 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2550 drop = [f for f in removed if f in m]
2558 drop = [f for f in removed if f in m]
2551 for f in drop:
2559 for f in drop:
2552 del m[f]
2560 del m[f]
2553 files = changed + removed
2561 files = changed + removed
2554 md = None
2562 md = None
2555 if not files:
2563 if not files:
2556 # if no "files" actually changed in terms of the changelog,
2564 # if no "files" actually changed in terms of the changelog,
2557 # try hard to detect unmodified manifest entry so that the
2565 # try hard to detect unmodified manifest entry so that the
2558 # exact same commit can be reproduced later on convert.
2566 # exact same commit can be reproduced later on convert.
2559 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2567 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2560 if not files and md:
2568 if not files and md:
2561 self.ui.debug('not reusing manifest (no file change in '
2569 self.ui.debug('not reusing manifest (no file change in '
2562 'changelog, but manifest differs)\n')
2570 'changelog, but manifest differs)\n')
2563 if files or md:
2571 if files or md:
2564 self.ui.note(_("committing manifest\n"))
2572 self.ui.note(_("committing manifest\n"))
2565 # we're using narrowmatch here since it's already applied at
2573 # we're using narrowmatch here since it's already applied at
2566 # other stages (such as dirstate.walk), so we're already
2574 # other stages (such as dirstate.walk), so we're already
2567 # ignoring things outside of narrowspec in most cases. The
2575 # ignoring things outside of narrowspec in most cases. The
2568 # one case where we might have files outside the narrowspec
2576 # one case where we might have files outside the narrowspec
2569 # at this point is merges, and we already error out in the
2577 # at this point is merges, and we already error out in the
2570 # case where the merge has files outside of the narrowspec,
2578 # case where the merge has files outside of the narrowspec,
2571 # so this is safe.
2579 # so this is safe.
2572 mn = mctx.write(trp, linkrev,
2580 mn = mctx.write(trp, linkrev,
2573 p1.manifestnode(), p2.manifestnode(),
2581 p1.manifestnode(), p2.manifestnode(),
2574 added, drop, match=self.narrowmatch())
2582 added, drop, match=self.narrowmatch())
2575 else:
2583 else:
2576 self.ui.debug('reusing manifest form p1 (listed files '
2584 self.ui.debug('reusing manifest form p1 (listed files '
2577 'actually unchanged)\n')
2585 'actually unchanged)\n')
2578 mn = p1.manifestnode()
2586 mn = p1.manifestnode()
2579 else:
2587 else:
2580 self.ui.debug('reusing manifest from p1 (no file change)\n')
2588 self.ui.debug('reusing manifest from p1 (no file change)\n')
2581 mn = p1.manifestnode()
2589 mn = p1.manifestnode()
2582 files = []
2590 files = []
2583
2591
2584 # update changelog
2592 # update changelog
2585 self.ui.note(_("committing changelog\n"))
2593 self.ui.note(_("committing changelog\n"))
2586 self.changelog.delayupdate(tr)
2594 self.changelog.delayupdate(tr)
2587 n = self.changelog.add(mn, files, ctx.description(),
2595 n = self.changelog.add(mn, files, ctx.description(),
2588 trp, p1.node(), p2.node(),
2596 trp, p1.node(), p2.node(),
2589 user, ctx.date(), ctx.extra().copy())
2597 user, ctx.date(), ctx.extra().copy())
2590 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2598 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2591 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2599 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2592 parent2=xp2)
2600 parent2=xp2)
2593 # set the new commit is proper phase
2601 # set the new commit is proper phase
2594 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2602 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2595 if targetphase:
2603 if targetphase:
2596 # retract boundary do not alter parent changeset.
2604 # retract boundary do not alter parent changeset.
2597 # if a parent have higher the resulting phase will
2605 # if a parent have higher the resulting phase will
2598 # be compliant anyway
2606 # be compliant anyway
2599 #
2607 #
2600 # if minimal phase was 0 we don't need to retract anything
2608 # if minimal phase was 0 we don't need to retract anything
2601 phases.registernew(self, tr, targetphase, [n])
2609 phases.registernew(self, tr, targetphase, [n])
2602 tr.close()
2610 tr.close()
2603 return n
2611 return n
2604 finally:
2612 finally:
2605 if tr:
2613 if tr:
2606 tr.release()
2614 tr.release()
2607 lock.release()
2615 lock.release()
2608
2616
2609 @unfilteredmethod
2617 @unfilteredmethod
2610 def destroying(self):
2618 def destroying(self):
2611 '''Inform the repository that nodes are about to be destroyed.
2619 '''Inform the repository that nodes are about to be destroyed.
2612 Intended for use by strip and rollback, so there's a common
2620 Intended for use by strip and rollback, so there's a common
2613 place for anything that has to be done before destroying history.
2621 place for anything that has to be done before destroying history.
2614
2622
2615 This is mostly useful for saving state that is in memory and waiting
2623 This is mostly useful for saving state that is in memory and waiting
2616 to be flushed when the current lock is released. Because a call to
2624 to be flushed when the current lock is released. Because a call to
2617 destroyed is imminent, the repo will be invalidated causing those
2625 destroyed is imminent, the repo will be invalidated causing those
2618 changes to stay in memory (waiting for the next unlock), or vanish
2626 changes to stay in memory (waiting for the next unlock), or vanish
2619 completely.
2627 completely.
2620 '''
2628 '''
2621 # When using the same lock to commit and strip, the phasecache is left
2629 # When using the same lock to commit and strip, the phasecache is left
2622 # dirty after committing. Then when we strip, the repo is invalidated,
2630 # dirty after committing. Then when we strip, the repo is invalidated,
2623 # causing those changes to disappear.
2631 # causing those changes to disappear.
2624 if '_phasecache' in vars(self):
2632 if '_phasecache' in vars(self):
2625 self._phasecache.write()
2633 self._phasecache.write()
2626
2634
2627 @unfilteredmethod
2635 @unfilteredmethod
2628 def destroyed(self):
2636 def destroyed(self):
2629 '''Inform the repository that nodes have been destroyed.
2637 '''Inform the repository that nodes have been destroyed.
2630 Intended for use by strip and rollback, so there's a common
2638 Intended for use by strip and rollback, so there's a common
2631 place for anything that has to be done after destroying history.
2639 place for anything that has to be done after destroying history.
2632 '''
2640 '''
2633 # When one tries to:
2641 # When one tries to:
2634 # 1) destroy nodes thus calling this method (e.g. strip)
2642 # 1) destroy nodes thus calling this method (e.g. strip)
2635 # 2) use phasecache somewhere (e.g. commit)
2643 # 2) use phasecache somewhere (e.g. commit)
2636 #
2644 #
2637 # then 2) will fail because the phasecache contains nodes that were
2645 # then 2) will fail because the phasecache contains nodes that were
2638 # removed. We can either remove phasecache from the filecache,
2646 # removed. We can either remove phasecache from the filecache,
2639 # causing it to reload next time it is accessed, or simply filter
2647 # causing it to reload next time it is accessed, or simply filter
2640 # the removed nodes now and write the updated cache.
2648 # the removed nodes now and write the updated cache.
2641 self._phasecache.filterunknown(self)
2649 self._phasecache.filterunknown(self)
2642 self._phasecache.write()
2650 self._phasecache.write()
2643
2651
2644 # refresh all repository caches
2652 # refresh all repository caches
2645 self.updatecaches()
2653 self.updatecaches()
2646
2654
2647 # Ensure the persistent tag cache is updated. Doing it now
2655 # Ensure the persistent tag cache is updated. Doing it now
2648 # means that the tag cache only has to worry about destroyed
2656 # means that the tag cache only has to worry about destroyed
2649 # heads immediately after a strip/rollback. That in turn
2657 # heads immediately after a strip/rollback. That in turn
2650 # guarantees that "cachetip == currenttip" (comparing both rev
2658 # guarantees that "cachetip == currenttip" (comparing both rev
2651 # and node) always means no nodes have been added or destroyed.
2659 # and node) always means no nodes have been added or destroyed.
2652
2660
2653 # XXX this is suboptimal when qrefresh'ing: we strip the current
2661 # XXX this is suboptimal when qrefresh'ing: we strip the current
2654 # head, refresh the tag cache, then immediately add a new head.
2662 # head, refresh the tag cache, then immediately add a new head.
2655 # But I think doing it this way is necessary for the "instant
2663 # But I think doing it this way is necessary for the "instant
2656 # tag cache retrieval" case to work.
2664 # tag cache retrieval" case to work.
2657 self.invalidate()
2665 self.invalidate()
2658
2666
2659 def status(self, node1='.', node2=None, match=None,
2667 def status(self, node1='.', node2=None, match=None,
2660 ignored=False, clean=False, unknown=False,
2668 ignored=False, clean=False, unknown=False,
2661 listsubrepos=False):
2669 listsubrepos=False):
2662 '''a convenience method that calls node1.status(node2)'''
2670 '''a convenience method that calls node1.status(node2)'''
2663 return self[node1].status(node2, match, ignored, clean, unknown,
2671 return self[node1].status(node2, match, ignored, clean, unknown,
2664 listsubrepos)
2672 listsubrepos)
2665
2673
2666 def addpostdsstatus(self, ps):
2674 def addpostdsstatus(self, ps):
2667 """Add a callback to run within the wlock, at the point at which status
2675 """Add a callback to run within the wlock, at the point at which status
2668 fixups happen.
2676 fixups happen.
2669
2677
2670 On status completion, callback(wctx, status) will be called with the
2678 On status completion, callback(wctx, status) will be called with the
2671 wlock held, unless the dirstate has changed from underneath or the wlock
2679 wlock held, unless the dirstate has changed from underneath or the wlock
2672 couldn't be grabbed.
2680 couldn't be grabbed.
2673
2681
2674 Callbacks should not capture and use a cached copy of the dirstate --
2682 Callbacks should not capture and use a cached copy of the dirstate --
2675 it might change in the meanwhile. Instead, they should access the
2683 it might change in the meanwhile. Instead, they should access the
2676 dirstate via wctx.repo().dirstate.
2684 dirstate via wctx.repo().dirstate.
2677
2685
2678 This list is emptied out after each status run -- extensions should
2686 This list is emptied out after each status run -- extensions should
2679 make sure it adds to this list each time dirstate.status is called.
2687 make sure it adds to this list each time dirstate.status is called.
2680 Extensions should also make sure they don't call this for statuses
2688 Extensions should also make sure they don't call this for statuses
2681 that don't involve the dirstate.
2689 that don't involve the dirstate.
2682 """
2690 """
2683
2691
2684 # The list is located here for uniqueness reasons -- it is actually
2692 # The list is located here for uniqueness reasons -- it is actually
2685 # managed by the workingctx, but that isn't unique per-repo.
2693 # managed by the workingctx, but that isn't unique per-repo.
2686 self._postdsstatus.append(ps)
2694 self._postdsstatus.append(ps)
2687
2695
2688 def postdsstatus(self):
2696 def postdsstatus(self):
2689 """Used by workingctx to get the list of post-dirstate-status hooks."""
2697 """Used by workingctx to get the list of post-dirstate-status hooks."""
2690 return self._postdsstatus
2698 return self._postdsstatus
2691
2699
2692 def clearpostdsstatus(self):
2700 def clearpostdsstatus(self):
2693 """Used by workingctx to clear post-dirstate-status hooks."""
2701 """Used by workingctx to clear post-dirstate-status hooks."""
2694 del self._postdsstatus[:]
2702 del self._postdsstatus[:]
2695
2703
2696 def heads(self, start=None):
2704 def heads(self, start=None):
2697 if start is None:
2705 if start is None:
2698 cl = self.changelog
2706 cl = self.changelog
2699 headrevs = reversed(cl.headrevs())
2707 headrevs = reversed(cl.headrevs())
2700 return [cl.node(rev) for rev in headrevs]
2708 return [cl.node(rev) for rev in headrevs]
2701
2709
2702 heads = self.changelog.heads(start)
2710 heads = self.changelog.heads(start)
2703 # sort the output in rev descending order
2711 # sort the output in rev descending order
2704 return sorted(heads, key=self.changelog.rev, reverse=True)
2712 return sorted(heads, key=self.changelog.rev, reverse=True)
2705
2713
2706 def branchheads(self, branch=None, start=None, closed=False):
2714 def branchheads(self, branch=None, start=None, closed=False):
2707 '''return a (possibly filtered) list of heads for the given branch
2715 '''return a (possibly filtered) list of heads for the given branch
2708
2716
2709 Heads are returned in topological order, from newest to oldest.
2717 Heads are returned in topological order, from newest to oldest.
2710 If branch is None, use the dirstate branch.
2718 If branch is None, use the dirstate branch.
2711 If start is not None, return only heads reachable from start.
2719 If start is not None, return only heads reachable from start.
2712 If closed is True, return heads that are marked as closed as well.
2720 If closed is True, return heads that are marked as closed as well.
2713 '''
2721 '''
2714 if branch is None:
2722 if branch is None:
2715 branch = self[None].branch()
2723 branch = self[None].branch()
2716 branches = self.branchmap()
2724 branches = self.branchmap()
2717 if branch not in branches:
2725 if branch not in branches:
2718 return []
2726 return []
2719 # the cache returns heads ordered lowest to highest
2727 # the cache returns heads ordered lowest to highest
2720 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2728 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2721 if start is not None:
2729 if start is not None:
2722 # filter out the heads that cannot be reached from startrev
2730 # filter out the heads that cannot be reached from startrev
2723 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2731 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2724 bheads = [h for h in bheads if h in fbheads]
2732 bheads = [h for h in bheads if h in fbheads]
2725 return bheads
2733 return bheads
2726
2734
2727 def branches(self, nodes):
2735 def branches(self, nodes):
2728 if not nodes:
2736 if not nodes:
2729 nodes = [self.changelog.tip()]
2737 nodes = [self.changelog.tip()]
2730 b = []
2738 b = []
2731 for n in nodes:
2739 for n in nodes:
2732 t = n
2740 t = n
2733 while True:
2741 while True:
2734 p = self.changelog.parents(n)
2742 p = self.changelog.parents(n)
2735 if p[1] != nullid or p[0] == nullid:
2743 if p[1] != nullid or p[0] == nullid:
2736 b.append((t, n, p[0], p[1]))
2744 b.append((t, n, p[0], p[1]))
2737 break
2745 break
2738 n = p[0]
2746 n = p[0]
2739 return b
2747 return b
2740
2748
2741 def between(self, pairs):
2749 def between(self, pairs):
2742 r = []
2750 r = []
2743
2751
2744 for top, bottom in pairs:
2752 for top, bottom in pairs:
2745 n, l, i = top, [], 0
2753 n, l, i = top, [], 0
2746 f = 1
2754 f = 1
2747
2755
2748 while n != bottom and n != nullid:
2756 while n != bottom and n != nullid:
2749 p = self.changelog.parents(n)[0]
2757 p = self.changelog.parents(n)[0]
2750 if i == f:
2758 if i == f:
2751 l.append(n)
2759 l.append(n)
2752 f = f * 2
2760 f = f * 2
2753 n = p
2761 n = p
2754 i += 1
2762 i += 1
2755
2763
2756 r.append(l)
2764 r.append(l)
2757
2765
2758 return r
2766 return r
2759
2767
2760 def checkpush(self, pushop):
2768 def checkpush(self, pushop):
2761 """Extensions can override this function if additional checks have
2769 """Extensions can override this function if additional checks have
2762 to be performed before pushing, or call it if they override push
2770 to be performed before pushing, or call it if they override push
2763 command.
2771 command.
2764 """
2772 """
2765
2773
2766 @unfilteredpropertycache
2774 @unfilteredpropertycache
2767 def prepushoutgoinghooks(self):
2775 def prepushoutgoinghooks(self):
2768 """Return util.hooks consists of a pushop with repo, remote, outgoing
2776 """Return util.hooks consists of a pushop with repo, remote, outgoing
2769 methods, which are called before pushing changesets.
2777 methods, which are called before pushing changesets.
2770 """
2778 """
2771 return util.hooks()
2779 return util.hooks()
2772
2780
2773 def pushkey(self, namespace, key, old, new):
2781 def pushkey(self, namespace, key, old, new):
2774 try:
2782 try:
2775 tr = self.currenttransaction()
2783 tr = self.currenttransaction()
2776 hookargs = {}
2784 hookargs = {}
2777 if tr is not None:
2785 if tr is not None:
2778 hookargs.update(tr.hookargs)
2786 hookargs.update(tr.hookargs)
2779 hookargs = pycompat.strkwargs(hookargs)
2787 hookargs = pycompat.strkwargs(hookargs)
2780 hookargs[r'namespace'] = namespace
2788 hookargs[r'namespace'] = namespace
2781 hookargs[r'key'] = key
2789 hookargs[r'key'] = key
2782 hookargs[r'old'] = old
2790 hookargs[r'old'] = old
2783 hookargs[r'new'] = new
2791 hookargs[r'new'] = new
2784 self.hook('prepushkey', throw=True, **hookargs)
2792 self.hook('prepushkey', throw=True, **hookargs)
2785 except error.HookAbort as exc:
2793 except error.HookAbort as exc:
2786 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2794 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2787 if exc.hint:
2795 if exc.hint:
2788 self.ui.write_err(_("(%s)\n") % exc.hint)
2796 self.ui.write_err(_("(%s)\n") % exc.hint)
2789 return False
2797 return False
2790 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2798 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2791 ret = pushkey.push(self, namespace, key, old, new)
2799 ret = pushkey.push(self, namespace, key, old, new)
2792 def runhook():
2800 def runhook():
2793 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2801 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2794 ret=ret)
2802 ret=ret)
2795 self._afterlock(runhook)
2803 self._afterlock(runhook)
2796 return ret
2804 return ret
2797
2805
2798 def listkeys(self, namespace):
2806 def listkeys(self, namespace):
2799 self.hook('prelistkeys', throw=True, namespace=namespace)
2807 self.hook('prelistkeys', throw=True, namespace=namespace)
2800 self.ui.debug('listing keys for "%s"\n' % namespace)
2808 self.ui.debug('listing keys for "%s"\n' % namespace)
2801 values = pushkey.list(self, namespace)
2809 values = pushkey.list(self, namespace)
2802 self.hook('listkeys', namespace=namespace, values=values)
2810 self.hook('listkeys', namespace=namespace, values=values)
2803 return values
2811 return values
2804
2812
2805 def debugwireargs(self, one, two, three=None, four=None, five=None):
2813 def debugwireargs(self, one, two, three=None, four=None, five=None):
2806 '''used to test argument passing over the wire'''
2814 '''used to test argument passing over the wire'''
2807 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2815 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2808 pycompat.bytestr(four),
2816 pycompat.bytestr(four),
2809 pycompat.bytestr(five))
2817 pycompat.bytestr(five))
2810
2818
2811 def savecommitmessage(self, text):
2819 def savecommitmessage(self, text):
2812 fp = self.vfs('last-message.txt', 'wb')
2820 fp = self.vfs('last-message.txt', 'wb')
2813 try:
2821 try:
2814 fp.write(text)
2822 fp.write(text)
2815 finally:
2823 finally:
2816 fp.close()
2824 fp.close()
2817 return self.pathto(fp.name[len(self.root) + 1:])
2825 return self.pathto(fp.name[len(self.root) + 1:])
2818
2826
2819 # used to avoid circular references so destructors work
2827 # used to avoid circular references so destructors work
2820 def aftertrans(files):
2828 def aftertrans(files):
2821 renamefiles = [tuple(t) for t in files]
2829 renamefiles = [tuple(t) for t in files]
2822 def a():
2830 def a():
2823 for vfs, src, dest in renamefiles:
2831 for vfs, src, dest in renamefiles:
2824 # if src and dest refer to a same file, vfs.rename is a no-op,
2832 # if src and dest refer to a same file, vfs.rename is a no-op,
2825 # leaving both src and dest on disk. delete dest to make sure
2833 # leaving both src and dest on disk. delete dest to make sure
2826 # the rename couldn't be such a no-op.
2834 # the rename couldn't be such a no-op.
2827 vfs.tryunlink(dest)
2835 vfs.tryunlink(dest)
2828 try:
2836 try:
2829 vfs.rename(src, dest)
2837 vfs.rename(src, dest)
2830 except OSError: # journal file does not yet exist
2838 except OSError: # journal file does not yet exist
2831 pass
2839 pass
2832 return a
2840 return a
2833
2841
2834 def undoname(fn):
2842 def undoname(fn):
2835 base, name = os.path.split(fn)
2843 base, name = os.path.split(fn)
2836 assert name.startswith('journal')
2844 assert name.startswith('journal')
2837 return os.path.join(base, name.replace('journal', 'undo', 1))
2845 return os.path.join(base, name.replace('journal', 'undo', 1))
2838
2846
2839 def instance(ui, path, create, intents=None, createopts=None):
2847 def instance(ui, path, create, intents=None, createopts=None):
2840 localpath = util.urllocalpath(path)
2848 localpath = util.urllocalpath(path)
2841 if create:
2849 if create:
2842 createrepository(ui, localpath, createopts=createopts)
2850 createrepository(ui, localpath, createopts=createopts)
2843
2851
2844 return makelocalrepository(ui, localpath, intents=intents)
2852 return makelocalrepository(ui, localpath, intents=intents)
2845
2853
2846 def islocal(path):
2854 def islocal(path):
2847 return True
2855 return True
2848
2856
2849 def defaultcreateopts(ui, createopts=None):
2857 def defaultcreateopts(ui, createopts=None):
2850 """Populate the default creation options for a repository.
2858 """Populate the default creation options for a repository.
2851
2859
2852 A dictionary of explicitly requested creation options can be passed
2860 A dictionary of explicitly requested creation options can be passed
2853 in. Missing keys will be populated.
2861 in. Missing keys will be populated.
2854 """
2862 """
2855 createopts = dict(createopts or {})
2863 createopts = dict(createopts or {})
2856
2864
2857 if 'backend' not in createopts:
2865 if 'backend' not in createopts:
2858 # experimental config: storage.new-repo-backend
2866 # experimental config: storage.new-repo-backend
2859 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2867 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2860
2868
2861 return createopts
2869 return createopts
2862
2870
2863 def newreporequirements(ui, createopts):
2871 def newreporequirements(ui, createopts):
2864 """Determine the set of requirements for a new local repository.
2872 """Determine the set of requirements for a new local repository.
2865
2873
2866 Extensions can wrap this function to specify custom requirements for
2874 Extensions can wrap this function to specify custom requirements for
2867 new repositories.
2875 new repositories.
2868 """
2876 """
2869 # If the repo is being created from a shared repository, we copy
2877 # If the repo is being created from a shared repository, we copy
2870 # its requirements.
2878 # its requirements.
2871 if 'sharedrepo' in createopts:
2879 if 'sharedrepo' in createopts:
2872 requirements = set(createopts['sharedrepo'].requirements)
2880 requirements = set(createopts['sharedrepo'].requirements)
2873 if createopts.get('sharedrelative'):
2881 if createopts.get('sharedrelative'):
2874 requirements.add('relshared')
2882 requirements.add('relshared')
2875 else:
2883 else:
2876 requirements.add('shared')
2884 requirements.add('shared')
2877
2885
2878 return requirements
2886 return requirements
2879
2887
2880 if 'backend' not in createopts:
2888 if 'backend' not in createopts:
2881 raise error.ProgrammingError('backend key not present in createopts; '
2889 raise error.ProgrammingError('backend key not present in createopts; '
2882 'was defaultcreateopts() called?')
2890 'was defaultcreateopts() called?')
2883
2891
2884 if createopts['backend'] != 'revlogv1':
2892 if createopts['backend'] != 'revlogv1':
2885 raise error.Abort(_('unable to determine repository requirements for '
2893 raise error.Abort(_('unable to determine repository requirements for '
2886 'storage backend: %s') % createopts['backend'])
2894 'storage backend: %s') % createopts['backend'])
2887
2895
2888 requirements = {'revlogv1'}
2896 requirements = {'revlogv1'}
2889 if ui.configbool('format', 'usestore'):
2897 if ui.configbool('format', 'usestore'):
2890 requirements.add('store')
2898 requirements.add('store')
2891 if ui.configbool('format', 'usefncache'):
2899 if ui.configbool('format', 'usefncache'):
2892 requirements.add('fncache')
2900 requirements.add('fncache')
2893 if ui.configbool('format', 'dotencode'):
2901 if ui.configbool('format', 'dotencode'):
2894 requirements.add('dotencode')
2902 requirements.add('dotencode')
2895
2903
2896 compengine = ui.config('experimental', 'format.compression')
2904 compengine = ui.config('experimental', 'format.compression')
2897 if compengine not in util.compengines:
2905 if compengine not in util.compengines:
2898 raise error.Abort(_('compression engine %s defined by '
2906 raise error.Abort(_('compression engine %s defined by '
2899 'experimental.format.compression not available') %
2907 'experimental.format.compression not available') %
2900 compengine,
2908 compengine,
2901 hint=_('run "hg debuginstall" to list available '
2909 hint=_('run "hg debuginstall" to list available '
2902 'compression engines'))
2910 'compression engines'))
2903
2911
2904 # zlib is the historical default and doesn't need an explicit requirement.
2912 # zlib is the historical default and doesn't need an explicit requirement.
2905 if compengine != 'zlib':
2913 if compengine != 'zlib':
2906 requirements.add('exp-compression-%s' % compengine)
2914 requirements.add('exp-compression-%s' % compengine)
2907
2915
2908 if scmutil.gdinitconfig(ui):
2916 if scmutil.gdinitconfig(ui):
2909 requirements.add('generaldelta')
2917 requirements.add('generaldelta')
2910 # experimental config: format.sparse-revlog
2918 # experimental config: format.sparse-revlog
2911 if ui.configbool('format', 'sparse-revlog'):
2919 if ui.configbool('format', 'sparse-revlog'):
2912 requirements.add(SPARSEREVLOG_REQUIREMENT)
2920 requirements.add(SPARSEREVLOG_REQUIREMENT)
2913 if ui.configbool('experimental', 'treemanifest'):
2921 if ui.configbool('experimental', 'treemanifest'):
2914 requirements.add('treemanifest')
2922 requirements.add('treemanifest')
2915
2923
2916 revlogv2 = ui.config('experimental', 'revlogv2')
2924 revlogv2 = ui.config('experimental', 'revlogv2')
2917 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2925 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2918 requirements.remove('revlogv1')
2926 requirements.remove('revlogv1')
2919 # generaldelta is implied by revlogv2.
2927 # generaldelta is implied by revlogv2.
2920 requirements.discard('generaldelta')
2928 requirements.discard('generaldelta')
2921 requirements.add(REVLOGV2_REQUIREMENT)
2929 requirements.add(REVLOGV2_REQUIREMENT)
2922 # experimental config: format.internal-phase
2930 # experimental config: format.internal-phase
2923 if ui.configbool('format', 'internal-phase'):
2931 if ui.configbool('format', 'internal-phase'):
2924 requirements.add('internal-phase')
2932 requirements.add('internal-phase')
2925
2933
2926 if createopts.get('narrowfiles'):
2934 if createopts.get('narrowfiles'):
2927 requirements.add(repository.NARROW_REQUIREMENT)
2935 requirements.add(repository.NARROW_REQUIREMENT)
2928
2936
2929 if createopts.get('lfs'):
2937 if createopts.get('lfs'):
2930 requirements.add('lfs')
2938 requirements.add('lfs')
2931
2939
2932 return requirements
2940 return requirements
2933
2941
2934 def filterknowncreateopts(ui, createopts):
2942 def filterknowncreateopts(ui, createopts):
2935 """Filters a dict of repo creation options against options that are known.
2943 """Filters a dict of repo creation options against options that are known.
2936
2944
2937 Receives a dict of repo creation options and returns a dict of those
2945 Receives a dict of repo creation options and returns a dict of those
2938 options that we don't know how to handle.
2946 options that we don't know how to handle.
2939
2947
2940 This function is called as part of repository creation. If the
2948 This function is called as part of repository creation. If the
2941 returned dict contains any items, repository creation will not
2949 returned dict contains any items, repository creation will not
2942 be allowed, as it means there was a request to create a repository
2950 be allowed, as it means there was a request to create a repository
2943 with options not recognized by loaded code.
2951 with options not recognized by loaded code.
2944
2952
2945 Extensions can wrap this function to filter out creation options
2953 Extensions can wrap this function to filter out creation options
2946 they know how to handle.
2954 they know how to handle.
2947 """
2955 """
2948 known = {
2956 known = {
2949 'backend',
2957 'backend',
2950 'lfs',
2958 'lfs',
2951 'narrowfiles',
2959 'narrowfiles',
2952 'sharedrepo',
2960 'sharedrepo',
2953 'sharedrelative',
2961 'sharedrelative',
2954 'shareditems',
2962 'shareditems',
2955 'shallowfilestore',
2963 'shallowfilestore',
2956 }
2964 }
2957
2965
2958 return {k: v for k, v in createopts.items() if k not in known}
2966 return {k: v for k, v in createopts.items() if k not in known}
2959
2967
2960 def createrepository(ui, path, createopts=None):
2968 def createrepository(ui, path, createopts=None):
2961 """Create a new repository in a vfs.
2969 """Create a new repository in a vfs.
2962
2970
2963 ``path`` path to the new repo's working directory.
2971 ``path`` path to the new repo's working directory.
2964 ``createopts`` options for the new repository.
2972 ``createopts`` options for the new repository.
2965
2973
2966 The following keys for ``createopts`` are recognized:
2974 The following keys for ``createopts`` are recognized:
2967
2975
2968 backend
2976 backend
2969 The storage backend to use.
2977 The storage backend to use.
2970 lfs
2978 lfs
2971 Repository will be created with ``lfs`` requirement. The lfs extension
2979 Repository will be created with ``lfs`` requirement. The lfs extension
2972 will automatically be loaded when the repository is accessed.
2980 will automatically be loaded when the repository is accessed.
2973 narrowfiles
2981 narrowfiles
2974 Set up repository to support narrow file storage.
2982 Set up repository to support narrow file storage.
2975 sharedrepo
2983 sharedrepo
2976 Repository object from which storage should be shared.
2984 Repository object from which storage should be shared.
2977 sharedrelative
2985 sharedrelative
2978 Boolean indicating if the path to the shared repo should be
2986 Boolean indicating if the path to the shared repo should be
2979 stored as relative. By default, the pointer to the "parent" repo
2987 stored as relative. By default, the pointer to the "parent" repo
2980 is stored as an absolute path.
2988 is stored as an absolute path.
2981 shareditems
2989 shareditems
2982 Set of items to share to the new repository (in addition to storage).
2990 Set of items to share to the new repository (in addition to storage).
2983 shallowfilestore
2991 shallowfilestore
2984 Indicates that storage for files should be shallow (not all ancestor
2992 Indicates that storage for files should be shallow (not all ancestor
2985 revisions are known).
2993 revisions are known).
2986 """
2994 """
2987 createopts = defaultcreateopts(ui, createopts=createopts)
2995 createopts = defaultcreateopts(ui, createopts=createopts)
2988
2996
2989 unknownopts = filterknowncreateopts(ui, createopts)
2997 unknownopts = filterknowncreateopts(ui, createopts)
2990
2998
2991 if not isinstance(unknownopts, dict):
2999 if not isinstance(unknownopts, dict):
2992 raise error.ProgrammingError('filterknowncreateopts() did not return '
3000 raise error.ProgrammingError('filterknowncreateopts() did not return '
2993 'a dict')
3001 'a dict')
2994
3002
2995 if unknownopts:
3003 if unknownopts:
2996 raise error.Abort(_('unable to create repository because of unknown '
3004 raise error.Abort(_('unable to create repository because of unknown '
2997 'creation option: %s') %
3005 'creation option: %s') %
2998 ', '.join(sorted(unknownopts)),
3006 ', '.join(sorted(unknownopts)),
2999 hint=_('is a required extension not loaded?'))
3007 hint=_('is a required extension not loaded?'))
3000
3008
3001 requirements = newreporequirements(ui, createopts=createopts)
3009 requirements = newreporequirements(ui, createopts=createopts)
3002
3010
3003 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3011 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3004
3012
3005 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3013 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3006 if hgvfs.exists():
3014 if hgvfs.exists():
3007 raise error.RepoError(_('repository %s already exists') % path)
3015 raise error.RepoError(_('repository %s already exists') % path)
3008
3016
3009 if 'sharedrepo' in createopts:
3017 if 'sharedrepo' in createopts:
3010 sharedpath = createopts['sharedrepo'].sharedpath
3018 sharedpath = createopts['sharedrepo'].sharedpath
3011
3019
3012 if createopts.get('sharedrelative'):
3020 if createopts.get('sharedrelative'):
3013 try:
3021 try:
3014 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3022 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3015 except (IOError, ValueError) as e:
3023 except (IOError, ValueError) as e:
3016 # ValueError is raised on Windows if the drive letters differ
3024 # ValueError is raised on Windows if the drive letters differ
3017 # on each path.
3025 # on each path.
3018 raise error.Abort(_('cannot calculate relative path'),
3026 raise error.Abort(_('cannot calculate relative path'),
3019 hint=stringutil.forcebytestr(e))
3027 hint=stringutil.forcebytestr(e))
3020
3028
3021 if not wdirvfs.exists():
3029 if not wdirvfs.exists():
3022 wdirvfs.makedirs()
3030 wdirvfs.makedirs()
3023
3031
3024 hgvfs.makedir(notindexed=True)
3032 hgvfs.makedir(notindexed=True)
3025 if 'sharedrepo' not in createopts:
3033 if 'sharedrepo' not in createopts:
3026 hgvfs.mkdir(b'cache')
3034 hgvfs.mkdir(b'cache')
3027 hgvfs.mkdir(b'wcache')
3035 hgvfs.mkdir(b'wcache')
3028
3036
3029 if b'store' in requirements and 'sharedrepo' not in createopts:
3037 if b'store' in requirements and 'sharedrepo' not in createopts:
3030 hgvfs.mkdir(b'store')
3038 hgvfs.mkdir(b'store')
3031
3039
3032 # We create an invalid changelog outside the store so very old
3040 # We create an invalid changelog outside the store so very old
3033 # Mercurial versions (which didn't know about the requirements
3041 # Mercurial versions (which didn't know about the requirements
3034 # file) encounter an error on reading the changelog. This
3042 # file) encounter an error on reading the changelog. This
3035 # effectively locks out old clients and prevents them from
3043 # effectively locks out old clients and prevents them from
3036 # mucking with a repo in an unknown format.
3044 # mucking with a repo in an unknown format.
3037 #
3045 #
3038 # The revlog header has version 2, which won't be recognized by
3046 # The revlog header has version 2, which won't be recognized by
3039 # such old clients.
3047 # such old clients.
3040 hgvfs.append(b'00changelog.i',
3048 hgvfs.append(b'00changelog.i',
3041 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3049 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3042 b'layout')
3050 b'layout')
3043
3051
3044 scmutil.writerequires(hgvfs, requirements)
3052 scmutil.writerequires(hgvfs, requirements)
3045
3053
3046 # Write out file telling readers where to find the shared store.
3054 # Write out file telling readers where to find the shared store.
3047 if 'sharedrepo' in createopts:
3055 if 'sharedrepo' in createopts:
3048 hgvfs.write(b'sharedpath', sharedpath)
3056 hgvfs.write(b'sharedpath', sharedpath)
3049
3057
3050 if createopts.get('shareditems'):
3058 if createopts.get('shareditems'):
3051 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3059 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3052 hgvfs.write(b'shared', shared)
3060 hgvfs.write(b'shared', shared)
3053
3061
3054 def poisonrepository(repo):
3062 def poisonrepository(repo):
3055 """Poison a repository instance so it can no longer be used."""
3063 """Poison a repository instance so it can no longer be used."""
3056 # Perform any cleanup on the instance.
3064 # Perform any cleanup on the instance.
3057 repo.close()
3065 repo.close()
3058
3066
3059 # Our strategy is to replace the type of the object with one that
3067 # Our strategy is to replace the type of the object with one that
3060 # has all attribute lookups result in error.
3068 # has all attribute lookups result in error.
3061 #
3069 #
3062 # But we have to allow the close() method because some constructors
3070 # But we have to allow the close() method because some constructors
3063 # of repos call close() on repo references.
3071 # of repos call close() on repo references.
3064 class poisonedrepository(object):
3072 class poisonedrepository(object):
3065 def __getattribute__(self, item):
3073 def __getattribute__(self, item):
3066 if item == r'close':
3074 if item == r'close':
3067 return object.__getattribute__(self, item)
3075 return object.__getattribute__(self, item)
3068
3076
3069 raise error.ProgrammingError('repo instances should not be used '
3077 raise error.ProgrammingError('repo instances should not be used '
3070 'after unshare')
3078 'after unshare')
3071
3079
3072 def close(self):
3080 def close(self):
3073 pass
3081 pass
3074
3082
3075 # We may have a repoview, which intercepts __setattr__. So be sure
3083 # We may have a repoview, which intercepts __setattr__. So be sure
3076 # we operate at the lowest level possible.
3084 # we operate at the lowest level possible.
3077 object.__setattr__(repo, r'__class__', poisonedrepository)
3085 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,228 +1,298 b''
1 # narrowspec.py - methods for working with a narrow view of a repository
1 # narrowspec.py - methods for working with a narrow view of a repository
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 error,
14 error,
15 match as matchmod,
15 match as matchmod,
16 merge,
16 repository,
17 repository,
17 sparse,
18 sparse,
18 util,
19 util,
19 )
20 )
20
21
22 # The file in .hg/store/ that indicates which paths exit in the store
21 FILENAME = 'narrowspec'
23 FILENAME = 'narrowspec'
24 # The file in .hg/ that indicates which paths exit in the dirstate
25 DIRSTATE_FILENAME = 'narrowspec.dirstate'
22
26
23 # Pattern prefixes that are allowed in narrow patterns. This list MUST
27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
24 # only contain patterns that are fast and safe to evaluate. Keep in mind
28 # only contain patterns that are fast and safe to evaluate. Keep in mind
25 # that patterns are supplied by clients and executed on remote servers
29 # that patterns are supplied by clients and executed on remote servers
26 # as part of wire protocol commands. That means that changes to this
30 # as part of wire protocol commands. That means that changes to this
27 # data structure influence the wire protocol and should not be taken
31 # data structure influence the wire protocol and should not be taken
28 # lightly - especially removals.
32 # lightly - especially removals.
29 VALID_PREFIXES = (
33 VALID_PREFIXES = (
30 b'path:',
34 b'path:',
31 b'rootfilesin:',
35 b'rootfilesin:',
32 )
36 )
33
37
34 def normalizesplitpattern(kind, pat):
38 def normalizesplitpattern(kind, pat):
35 """Returns the normalized version of a pattern and kind.
39 """Returns the normalized version of a pattern and kind.
36
40
37 Returns a tuple with the normalized kind and normalized pattern.
41 Returns a tuple with the normalized kind and normalized pattern.
38 """
42 """
39 pat = pat.rstrip('/')
43 pat = pat.rstrip('/')
40 _validatepattern(pat)
44 _validatepattern(pat)
41 return kind, pat
45 return kind, pat
42
46
43 def _numlines(s):
47 def _numlines(s):
44 """Returns the number of lines in s, including ending empty lines."""
48 """Returns the number of lines in s, including ending empty lines."""
45 # We use splitlines because it is Unicode-friendly and thus Python 3
49 # We use splitlines because it is Unicode-friendly and thus Python 3
46 # compatible. However, it does not count empty lines at the end, so trick
50 # compatible. However, it does not count empty lines at the end, so trick
47 # it by adding a character at the end.
51 # it by adding a character at the end.
48 return len((s + 'x').splitlines())
52 return len((s + 'x').splitlines())
49
53
50 def _validatepattern(pat):
54 def _validatepattern(pat):
51 """Validates the pattern and aborts if it is invalid.
55 """Validates the pattern and aborts if it is invalid.
52
56
53 Patterns are stored in the narrowspec as newline-separated
57 Patterns are stored in the narrowspec as newline-separated
54 POSIX-style bytestring paths. There's no escaping.
58 POSIX-style bytestring paths. There's no escaping.
55 """
59 """
56
60
57 # We use newlines as separators in the narrowspec file, so don't allow them
61 # We use newlines as separators in the narrowspec file, so don't allow them
58 # in patterns.
62 # in patterns.
59 if _numlines(pat) > 1:
63 if _numlines(pat) > 1:
60 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
64 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
61
65
62 components = pat.split('/')
66 components = pat.split('/')
63 if '.' in components or '..' in components:
67 if '.' in components or '..' in components:
64 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
68 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
65
69
66 def normalizepattern(pattern, defaultkind='path'):
70 def normalizepattern(pattern, defaultkind='path'):
67 """Returns the normalized version of a text-format pattern.
71 """Returns the normalized version of a text-format pattern.
68
72
69 If the pattern has no kind, the default will be added.
73 If the pattern has no kind, the default will be added.
70 """
74 """
71 kind, pat = matchmod._patsplit(pattern, defaultkind)
75 kind, pat = matchmod._patsplit(pattern, defaultkind)
72 return '%s:%s' % normalizesplitpattern(kind, pat)
76 return '%s:%s' % normalizesplitpattern(kind, pat)
73
77
74 def parsepatterns(pats):
78 def parsepatterns(pats):
75 """Parses an iterable of patterns into a typed pattern set.
79 """Parses an iterable of patterns into a typed pattern set.
76
80
77 Patterns are assumed to be ``path:`` if no prefix is present.
81 Patterns are assumed to be ``path:`` if no prefix is present.
78 For safety and performance reasons, only some prefixes are allowed.
82 For safety and performance reasons, only some prefixes are allowed.
79 See ``validatepatterns()``.
83 See ``validatepatterns()``.
80
84
81 This function should be used on patterns that come from the user to
85 This function should be used on patterns that come from the user to
82 normalize and validate them to the internal data structure used for
86 normalize and validate them to the internal data structure used for
83 representing patterns.
87 representing patterns.
84 """
88 """
85 res = {normalizepattern(orig) for orig in pats}
89 res = {normalizepattern(orig) for orig in pats}
86 validatepatterns(res)
90 validatepatterns(res)
87 return res
91 return res
88
92
89 def validatepatterns(pats):
93 def validatepatterns(pats):
90 """Validate that patterns are in the expected data structure and format.
94 """Validate that patterns are in the expected data structure and format.
91
95
92 And that is a set of normalized patterns beginning with ``path:`` or
96 And that is a set of normalized patterns beginning with ``path:`` or
93 ``rootfilesin:``.
97 ``rootfilesin:``.
94
98
95 This function should be used to validate internal data structures
99 This function should be used to validate internal data structures
96 and patterns that are loaded from sources that use the internal,
100 and patterns that are loaded from sources that use the internal,
97 prefixed pattern representation (but can't necessarily be fully trusted).
101 prefixed pattern representation (but can't necessarily be fully trusted).
98 """
102 """
99 if not isinstance(pats, set):
103 if not isinstance(pats, set):
100 raise error.ProgrammingError('narrow patterns should be a set; '
104 raise error.ProgrammingError('narrow patterns should be a set; '
101 'got %r' % pats)
105 'got %r' % pats)
102
106
103 for pat in pats:
107 for pat in pats:
104 if not pat.startswith(VALID_PREFIXES):
108 if not pat.startswith(VALID_PREFIXES):
105 # Use a Mercurial exception because this can happen due to user
109 # Use a Mercurial exception because this can happen due to user
106 # bugs (e.g. manually updating spec file).
110 # bugs (e.g. manually updating spec file).
107 raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
111 raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
108 hint=_('narrow patterns must begin with one of '
112 hint=_('narrow patterns must begin with one of '
109 'the following: %s') %
113 'the following: %s') %
110 ', '.join(VALID_PREFIXES))
114 ', '.join(VALID_PREFIXES))
111
115
112 def format(includes, excludes):
116 def format(includes, excludes):
113 output = '[include]\n'
117 output = '[include]\n'
114 for i in sorted(includes - excludes):
118 for i in sorted(includes - excludes):
115 output += i + '\n'
119 output += i + '\n'
116 output += '[exclude]\n'
120 output += '[exclude]\n'
117 for e in sorted(excludes):
121 for e in sorted(excludes):
118 output += e + '\n'
122 output += e + '\n'
119 return output
123 return output
120
124
121 def match(root, include=None, exclude=None):
125 def match(root, include=None, exclude=None):
122 if not include:
126 if not include:
123 # Passing empty include and empty exclude to matchmod.match()
127 # Passing empty include and empty exclude to matchmod.match()
124 # gives a matcher that matches everything, so explicitly use
128 # gives a matcher that matches everything, so explicitly use
125 # the nevermatcher.
129 # the nevermatcher.
126 return matchmod.never(root, '')
130 return matchmod.never(root, '')
127 return matchmod.match(root, '', [], include=include or [],
131 return matchmod.match(root, '', [], include=include or [],
128 exclude=exclude or [])
132 exclude=exclude or [])
129
133
130 def parseconfig(ui, spec):
134 def parseconfig(ui, spec):
131 # maybe we should care about the profiles returned too
135 # maybe we should care about the profiles returned too
132 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
136 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
133 if profiles:
137 if profiles:
134 raise error.Abort(_("including other spec files using '%include' is not"
138 raise error.Abort(_("including other spec files using '%include' is not"
135 " supported in narrowspec"))
139 " supported in narrowspec"))
136
140
137 validatepatterns(includepats)
141 validatepatterns(includepats)
138 validatepatterns(excludepats)
142 validatepatterns(excludepats)
139
143
140 return includepats, excludepats
144 return includepats, excludepats
141
145
142 def load(repo):
146 def load(repo):
143 try:
147 try:
144 spec = repo.svfs.read(FILENAME)
148 spec = repo.svfs.read(FILENAME)
145 except IOError as e:
149 except IOError as e:
146 # Treat "narrowspec does not exist" the same as "narrowspec file exists
150 # Treat "narrowspec does not exist" the same as "narrowspec file exists
147 # and is empty".
151 # and is empty".
148 if e.errno == errno.ENOENT:
152 if e.errno == errno.ENOENT:
149 return set(), set()
153 return set(), set()
150 raise
154 raise
151
155
152 return parseconfig(repo.ui, spec)
156 return parseconfig(repo.ui, spec)
153
157
154 def save(repo, includepats, excludepats):
158 def save(repo, includepats, excludepats):
155 validatepatterns(includepats)
159 validatepatterns(includepats)
156 validatepatterns(excludepats)
160 validatepatterns(excludepats)
157 spec = format(includepats, excludepats)
161 spec = format(includepats, excludepats)
158 repo.svfs.write(FILENAME, spec)
162 repo.svfs.write(FILENAME, spec)
159
163
164 def copytoworkingcopy(repo, tr):
165 if tr:
166 def write(file):
167 spec = repo.svfs.read(FILENAME)
168 file.write(spec)
169 file.close()
170 tr.addfilegenerator('narrowspec', (DIRSTATE_FILENAME,), write,
171 location='plain')
172 else:
173 spec = repo.svfs.read(FILENAME)
174 repo.vfs.write(DIRSTATE_FILENAME, spec)
175
160 def savebackup(repo, backupname):
176 def savebackup(repo, backupname):
161 if repository.NARROW_REQUIREMENT not in repo.requirements:
177 if repository.NARROW_REQUIREMENT not in repo.requirements:
162 return
178 return
163 svfs = repo.svfs
179 svfs = repo.svfs
164 svfs.tryunlink(backupname)
180 svfs.tryunlink(backupname)
165 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
181 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
166
182
167 def restorebackup(repo, backupname):
183 def restorebackup(repo, backupname):
168 if repository.NARROW_REQUIREMENT not in repo.requirements:
184 if repository.NARROW_REQUIREMENT not in repo.requirements:
169 return
185 return
170 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
186 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
171
187
172 def clearbackup(repo, backupname):
188 def clearbackup(repo, backupname):
173 if repository.NARROW_REQUIREMENT not in repo.requirements:
189 if repository.NARROW_REQUIREMENT not in repo.requirements:
174 return
190 return
175 repo.svfs.unlink(backupname)
191 repo.svfs.unlink(backupname)
176
192
177 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
193 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
178 r""" Restricts the patterns according to repo settings,
194 r""" Restricts the patterns according to repo settings,
179 results in a logical AND operation
195 results in a logical AND operation
180
196
181 :param req_includes: requested includes
197 :param req_includes: requested includes
182 :param req_excludes: requested excludes
198 :param req_excludes: requested excludes
183 :param repo_includes: repo includes
199 :param repo_includes: repo includes
184 :param repo_excludes: repo excludes
200 :param repo_excludes: repo excludes
185 :return: include patterns, exclude patterns, and invalid include patterns.
201 :return: include patterns, exclude patterns, and invalid include patterns.
186
202
187 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
203 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
188 (set(['f1']), {}, [])
204 (set(['f1']), {}, [])
189 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
205 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
190 (set(['f1']), {}, [])
206 (set(['f1']), {}, [])
191 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
207 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
192 (set(['f1/fc1']), {}, [])
208 (set(['f1/fc1']), {}, [])
193 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
209 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
194 ([], set(['path:.']), [])
210 ([], set(['path:.']), [])
195 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
211 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
196 (set(['f2/fc2']), {}, [])
212 (set(['f2/fc2']), {}, [])
197 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
213 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
198 ([], set(['path:.']), [])
214 ([], set(['path:.']), [])
199 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
215 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
200 (set(['f1/$non_exitent_var']), {}, [])
216 (set(['f1/$non_exitent_var']), {}, [])
201 """
217 """
202 res_excludes = set(req_excludes)
218 res_excludes = set(req_excludes)
203 res_excludes.update(repo_excludes)
219 res_excludes.update(repo_excludes)
204 invalid_includes = []
220 invalid_includes = []
205 if not req_includes:
221 if not req_includes:
206 res_includes = set(repo_includes)
222 res_includes = set(repo_includes)
207 elif 'path:.' not in repo_includes:
223 elif 'path:.' not in repo_includes:
208 res_includes = []
224 res_includes = []
209 for req_include in req_includes:
225 for req_include in req_includes:
210 req_include = util.expandpath(util.normpath(req_include))
226 req_include = util.expandpath(util.normpath(req_include))
211 if req_include in repo_includes:
227 if req_include in repo_includes:
212 res_includes.append(req_include)
228 res_includes.append(req_include)
213 continue
229 continue
214 valid = False
230 valid = False
215 for repo_include in repo_includes:
231 for repo_include in repo_includes:
216 if req_include.startswith(repo_include + '/'):
232 if req_include.startswith(repo_include + '/'):
217 valid = True
233 valid = True
218 res_includes.append(req_include)
234 res_includes.append(req_include)
219 break
235 break
220 if not valid:
236 if not valid:
221 invalid_includes.append(req_include)
237 invalid_includes.append(req_include)
222 if len(res_includes) == 0:
238 if len(res_includes) == 0:
223 res_excludes = {'path:.'}
239 res_excludes = {'path:.'}
224 else:
240 else:
225 res_includes = set(res_includes)
241 res_includes = set(res_includes)
226 else:
242 else:
227 res_includes = set(req_includes)
243 res_includes = set(req_includes)
228 return res_includes, res_excludes, invalid_includes
244 return res_includes, res_excludes, invalid_includes
245
246 # These two are extracted for extensions (specifically for Google's CitC file
247 # system)
248 def _deletecleanfiles(repo, files):
249 for f in files:
250 repo.wvfs.unlinkpath(f)
251
252 def _writeaddedfiles(repo, pctx, files):
253 actions = merge.emptyactions()
254 addgaction = actions['g'].append
255 mf = repo['.'].manifest()
256 for f in files:
257 if not repo.wvfs.exists(f):
258 addgaction((f, (mf.flags(f), False), "narrowspec updated"))
259 merge.applyupdates(repo, actions, wctx=repo[None],
260 mctx=repo['.'], overwrite=False)
261
262 def checkworkingcopynarrowspec(repo):
263 storespec = repo.svfs.tryread(FILENAME)
264 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
265 if wcspec != storespec:
266 raise error.Abort(_("working copy's narrowspec is stale"),
267 hint=_("run 'hg tracked --update-working-copy'"))
268
269 def updateworkingcopy(repo, tr):
270 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
271 newspec = repo.svfs.tryread(FILENAME)
272
273 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
274 newincludes, newexcludes = parseconfig(repo.ui, newspec)
275 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
276 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
277 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
278 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
279
280 ds = repo.dirstate
281 lookup, status = ds.status(removedmatch, subrepos=[], ignored=False,
282 clean=True, unknown=False)
283 _deletecleanfiles(repo, status.clean)
284 trackeddirty = lookup + status.modified + status.added
285 for f in sorted(trackeddirty):
286 repo.ui.status(_('not deleting possibly dirty file %s\n') % f)
287 for f in status.clean + trackeddirty:
288 ds.drop(f)
289
290 repo.narrowpats = newincludes, newexcludes
291 repo._narrowmatch = newmatch
292 pctx = repo['.']
293 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
294 for f in newfiles:
295 ds.normallookup(f)
296 _writeaddedfiles(repo, pctx, newfiles)
297
298 ds.write(tr)
@@ -1,43 +1,44 b''
1 $ . "$TESTDIR/narrow-library.sh"
1 $ . "$TESTDIR/narrow-library.sh"
2 $ hg init repo
2 $ hg init repo
3 $ cd repo
3 $ cd repo
4 $ cat << EOF > .hg/store/narrowspec
4 $ cat << EOF > .hg/store/narrowspec
5 > [include]
5 > [include]
6 > path:foo
6 > path:foo
7 > [exclude]
7 > [exclude]
8 > EOF
8 > EOF
9 $ cp .hg/store/narrowspec .hg/narrowspec.dirstate
9 $ echo treemanifest >> .hg/requires
10 $ echo treemanifest >> .hg/requires
10 $ echo narrowhg-experimental >> .hg/requires
11 $ echo narrowhg-experimental >> .hg/requires
11 $ mkdir -p foo/bar
12 $ mkdir -p foo/bar
12 $ echo b > foo/f
13 $ echo b > foo/f
13 $ echo c > foo/bar/f
14 $ echo c > foo/bar/f
14 $ hg commit -Am hi
15 $ hg commit -Am hi
15 adding foo/bar/f
16 adding foo/bar/f
16 adding foo/f
17 adding foo/f
17 $ hg debugindex -m
18 $ hg debugindex -m
18 rev linkrev nodeid p1 p2
19 rev linkrev nodeid p1 p2
19 0 0 14a5d056d75a 000000000000 000000000000
20 0 0 14a5d056d75a 000000000000 000000000000
20 $ hg debugindex --dir foo
21 $ hg debugindex --dir foo
21 rev linkrev nodeid p1 p2
22 rev linkrev nodeid p1 p2
22 0 0 e635c7857aef 000000000000 000000000000
23 0 0 e635c7857aef 000000000000 000000000000
23 $ hg debugindex --dir foo/
24 $ hg debugindex --dir foo/
24 rev linkrev nodeid p1 p2
25 rev linkrev nodeid p1 p2
25 0 0 e635c7857aef 000000000000 000000000000
26 0 0 e635c7857aef 000000000000 000000000000
26 $ hg debugindex --dir foo/bar
27 $ hg debugindex --dir foo/bar
27 rev linkrev nodeid p1 p2
28 rev linkrev nodeid p1 p2
28 0 0 e091d4224761 000000000000 000000000000
29 0 0 e091d4224761 000000000000 000000000000
29 $ hg debugindex --dir foo/bar/
30 $ hg debugindex --dir foo/bar/
30 rev linkrev nodeid p1 p2
31 rev linkrev nodeid p1 p2
31 0 0 e091d4224761 000000000000 000000000000
32 0 0 e091d4224761 000000000000 000000000000
32 $ hg debugdata -m 0
33 $ hg debugdata -m 0
33 foo\x00e635c7857aef92ac761ce5741a99da159abbbb24t (esc)
34 foo\x00e635c7857aef92ac761ce5741a99da159abbbb24t (esc)
34 $ hg debugdata --dir foo 0
35 $ hg debugdata --dir foo 0
35 bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
36 bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
36 f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
37 f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
37 $ hg debugdata --dir foo/ 0
38 $ hg debugdata --dir foo/ 0
38 bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
39 bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
39 f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
40 f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
40 $ hg debugdata --dir foo/bar 0
41 $ hg debugdata --dir foo/bar 0
41 f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
42 f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
42 $ hg debugdata --dir foo/bar/ 0
43 $ hg debugdata --dir foo/bar/ 0
43 f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
44 f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
@@ -1,132 +1,170 b''
1 #testcases flat tree
1 #testcases flat tree
2
2
3 $ . "$TESTDIR/narrow-library.sh"
3 $ . "$TESTDIR/narrow-library.sh"
4
4
5 #if tree
5 #if tree
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [experimental]
7 > [experimental]
8 > treemanifest = 1
8 > treemanifest = 1
9 > EOF
9 > EOF
10 #endif
10 #endif
11
11
12 $ cat << EOF >> $HGRCPATH
12 $ cat << EOF >> $HGRCPATH
13 > [extensions]
13 > [extensions]
14 > share =
14 > share =
15 > EOF
15 > EOF
16
16
17 $ hg init remote
17 $ hg init remote
18 $ cd remote
18 $ cd remote
19 $ for x in `$TESTDIR/seq.py 0 10`
19 $ for x in `$TESTDIR/seq.py 0 10`
20 > do
20 > do
21 > mkdir d$x
21 > mkdir d$x
22 > echo $x > d$x/f
22 > echo $x > d$x/f
23 > hg add d$x/f
23 > hg add d$x/f
24 > hg commit -m "add d$x/f"
24 > hg commit -m "add d$x/f"
25 > done
25 > done
26 $ cd ..
26 $ cd ..
27
27
28 $ hg clone --narrow ssh://user@dummy/remote main -q \
28 $ hg clone --narrow ssh://user@dummy/remote main -q \
29 > --include d1 --include d3 --include d5 --include d7
29 > --include d1 --include d3 --include d5 --include d7
30
30
31 $ hg share main share
31 $ hg share main share
32 updating working directory
32 updating working directory
33 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
33 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
34 $ hg -R share tracked
34 $ hg -R share tracked
35 I path:d1
35 I path:d1
36 I path:d3
36 I path:d3
37 I path:d5
37 I path:d5
38 I path:d7
38 I path:d7
39 $ hg -R share files
39 $ hg -R share files
40 share/d1/f
40 share/d1/f
41 share/d3/f
41 share/d3/f
42 share/d5/f
42 share/d5/f
43 share/d7/f
43 share/d7/f
44
44
45 Narrow the share and check that the main repo's working copy gets updated
45 Narrow the share and check that the main repo's working copy gets updated
46
46
47 # Make d3/f dirty
47 # Make d3/f dirty
48 $ echo x >> main/d3/f
48 $ echo x >> main/d3/f
49 $ echo y >> main/d3/g
49 $ echo y >> main/d3/g
50 $ hg add main/d3/g
50 $ hg add main/d3/g
51 $ hg -R main st
51 $ hg -R main st
52 M d3/f
52 M d3/f
53 A d3/g
53 A d3/g
54 # Make d5/f not match the dirstate timestamp even though it's clean
54 # Make d5/f not match the dirstate timestamp even though it's clean
55 $ sleep 2
55 $ sleep 2
56 $ hg -R main st
56 $ hg -R main st
57 M d3/f
57 M d3/f
58 A d3/g
58 A d3/g
59 $ hg -R main debugdirstate --no-dates
59 $ hg -R main debugdirstate --no-dates
60 n 644 2 set d1/f
60 n 644 2 set d1/f
61 n 644 2 unset d3/f
61 n 644 2 unset d3/f
62 a 0 -1 unset d3/g
62 a 0 -1 unset d3/g
63 n 644 2 set d5/f
63 n 644 2 set d5/f
64 n 644 2 set d7/f
64 n 644 2 set d7/f
65 $ touch main/d5/f
65 $ touch main/d5/f
66 $ hg -R share tracked --removeinclude d1 --removeinclude d3 --removeinclude d5
66 $ hg -R share tracked --removeinclude d1 --removeinclude d3 --removeinclude d5
67 comparing with ssh://user@dummy/remote
67 comparing with ssh://user@dummy/remote
68 searching for changes
68 searching for changes
69 looking for local changes to affected paths
69 looking for local changes to affected paths
70 deleting data/d1/f.i
70 deleting data/d1/f.i
71 deleting data/d3/f.i
71 deleting data/d3/f.i
72 deleting data/d5/f.i
72 deleting data/d5/f.i
73 deleting meta/d1/00manifest.i (tree !)
73 deleting meta/d1/00manifest.i (tree !)
74 deleting meta/d3/00manifest.i (tree !)
74 deleting meta/d3/00manifest.i (tree !)
75 deleting meta/d5/00manifest.i (tree !)
75 deleting meta/d5/00manifest.i (tree !)
76 $ hg -R main tracked
76 $ hg -R main tracked
77 I path:d7
77 I path:d7
78 $ hg -R main files
79 abort: working copy's narrowspec is stale
80 (run 'hg tracked --update-working-copy')
81 [255]
82 $ hg -R main tracked --update-working-copy
83 not deleting possibly dirty file d3/f
84 not deleting possibly dirty file d3/g
85 not deleting possibly dirty file d5/f
78 # d1/f, d3/f, d3/g and d5/f should no longer be reported
86 # d1/f, d3/f, d3/g and d5/f should no longer be reported
79 $ hg -R main files
87 $ hg -R main files
80 main/d7/f
88 main/d7/f
81 # d1/f should no longer be there, d3/f should be since it was dirty, d3/g should be there since
89 # d1/f should no longer be there, d3/f should be since it was dirty, d3/g should be there since
82 # it was added, and d5/f should be since we couldn't be sure it was clean
90 # it was added, and d5/f should be since we couldn't be sure it was clean
83 $ find main/d* -type f | sort
91 $ find main/d* -type f | sort
84 main/d1/f
85 main/d3/f
92 main/d3/f
86 main/d3/g
93 main/d3/g
87 main/d5/f
94 main/d5/f
88 main/d7/f
95 main/d7/f
89
96
90 Widen the share and check that the main repo's working copy gets updated
97 Widen the share and check that the main repo's working copy gets updated
91
98
92 $ hg -R share tracked --addinclude d1 --addinclude d3 -q
99 $ hg -R share tracked --addinclude d1 --addinclude d3 -q
93 $ hg -R share tracked
100 $ hg -R share tracked
94 I path:d1
101 I path:d1
95 I path:d3
102 I path:d3
96 I path:d7
103 I path:d7
97 $ hg -R share files
104 $ hg -R share files
98 share/d1/f
105 share/d1/f
99 share/d3/f
106 share/d3/f
100 share/d7/f
107 share/d7/f
101 $ hg -R main tracked
108 $ hg -R main tracked
102 I path:d1
109 I path:d1
103 I path:d3
110 I path:d3
104 I path:d7
111 I path:d7
112 $ hg -R main files
113 abort: working copy's narrowspec is stale
114 (run 'hg tracked --update-working-copy')
115 [255]
116 $ hg -R main tracked --update-working-copy
105 # d1/f, d3/f should be back
117 # d1/f, d3/f should be back
106 $ hg -R main files
118 $ hg -R main files
107 main/d1/f
119 main/d1/f
108 main/d3/f
120 main/d3/f
109 main/d3/g
110 main/d7/f
121 main/d7/f
111 # d3/f should be modified (not clobbered by the widening), and d3/g should be untracked
122 # d3/f should be modified (not clobbered by the widening), and d3/g should be untracked
112 $ hg -R main st --all
123 $ hg -R main st --all
113 M d3/f
124 M d3/f
114 A d3/g
125 ? d3/g
115 C d1/f
126 C d1/f
116 C d7/f
127 C d7/f
117
128
118 We should also be able to unshare without breaking everything:
129 We should also be able to unshare without breaking everything:
119
130
120 $ hg share main share-unshare
131 $ hg share main share-unshare
121 updating working directory
132 updating working directory
122 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
123 $ cd share-unshare
134 $ cd share-unshare
124 $ hg unshare
135 $ hg unshare
125 $ hg verify
136 $ hg verify
126 checking changesets
137 checking changesets
127 checking manifests
138 checking manifests
128 checking directory manifests (tree !)
139 checking directory manifests (tree !)
129 crosschecking files in changesets and manifests
140 crosschecking files in changesets and manifests
130 checking files
141 checking files
131 checked 11 changesets with 3 changes to 3 files
142 checked 11 changesets with 3 changes to 3 files
132 $ cd ..
143 $ cd ..
144
145 Dirstate should be left alone when upgrading from version of hg that didn't support narrow+share
146
147 $ hg share main share-upgrade
148 updating working directory
149 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
150 $ cd share-upgrade
151 $ echo x >> d1/f
152 $ echo y >> d3/g
153 $ hg add d3/g
154 $ hg rm d7/f
155 $ hg st
156 M d1/f
157 A d3/g
158 R d7/f
159 Make it look like a repo from before narrow+share was supported
160 $ rm .hg/narrowspec.dirstate
161 $ hg st
162 abort: working copy's narrowspec is stale
163 (run 'hg tracked --update-working-copy')
164 [255]
165 $ hg tracked --update-working-copy
166 $ hg st
167 M d1/f
168 A d3/g
169 R d7/f
170 $ cd ..
@@ -1,218 +1,220 b''
1 #testcases flat tree
1 #testcases flat tree
2 $ . "$TESTDIR/narrow-library.sh"
2 $ . "$TESTDIR/narrow-library.sh"
3
3
4 #if tree
4 #if tree
5 $ cat << EOF >> $HGRCPATH
5 $ cat << EOF >> $HGRCPATH
6 > [experimental]
6 > [experimental]
7 > treemanifest = 1
7 > treemanifest = 1
8 > EOF
8 > EOF
9 #endif
9 #endif
10
10
11 $ hg init master
11 $ hg init master
12 $ cd master
12 $ cd master
13 $ cat >> .hg/hgrc <<EOF
13 $ cat >> .hg/hgrc <<EOF
14 > [narrow]
14 > [narrow]
15 > serveellipses=True
15 > serveellipses=True
16 > EOF
16 > EOF
17
17
18 $ mkdir inside
18 $ mkdir inside
19 $ echo 'inside' > inside/f
19 $ echo 'inside' > inside/f
20 $ hg add inside/f
20 $ hg add inside/f
21 $ hg commit -m 'add inside'
21 $ hg commit -m 'add inside'
22
22
23 $ mkdir widest
23 $ mkdir widest
24 $ echo 'widest' > widest/f
24 $ echo 'widest' > widest/f
25 $ hg add widest/f
25 $ hg add widest/f
26 $ hg commit -m 'add widest'
26 $ hg commit -m 'add widest'
27
27
28 $ mkdir outside
28 $ mkdir outside
29 $ echo 'outside' > outside/f
29 $ echo 'outside' > outside/f
30 $ hg add outside/f
30 $ hg add outside/f
31 $ hg commit -m 'add outside'
31 $ hg commit -m 'add outside'
32
32
33 $ cd ..
33 $ cd ..
34
34
35 narrow clone the inside file
35 narrow clone the inside file
36
36
37 $ hg clone --narrow ssh://user@dummy/master narrow --include inside
37 $ hg clone --narrow ssh://user@dummy/master narrow --include inside
38 requesting all changes
38 requesting all changes
39 adding changesets
39 adding changesets
40 adding manifests
40 adding manifests
41 adding file changes
41 adding file changes
42 added 2 changesets with 1 changes to 1 files
42 added 2 changesets with 1 changes to 1 files
43 new changesets *:* (glob)
43 new changesets *:* (glob)
44 updating to branch default
44 updating to branch default
45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 $ cd narrow
46 $ cd narrow
47 $ hg tracked
47 $ hg tracked
48 I path:inside
48 I path:inside
49 $ ls
49 $ ls
50 inside
50 inside
51 $ cat inside/f
51 $ cat inside/f
52 inside
52 inside
53 $ cd ..
53 $ cd ..
54
54
55 add more upstream files which we will include in a wider narrow spec
55 add more upstream files which we will include in a wider narrow spec
56
56
57 $ cd master
57 $ cd master
58
58
59 $ mkdir wider
59 $ mkdir wider
60 $ echo 'wider' > wider/f
60 $ echo 'wider' > wider/f
61 $ hg add wider/f
61 $ hg add wider/f
62 $ echo 'widest v2' > widest/f
62 $ echo 'widest v2' > widest/f
63 $ hg commit -m 'add wider, update widest'
63 $ hg commit -m 'add wider, update widest'
64
64
65 $ echo 'widest v3' > widest/f
65 $ echo 'widest v3' > widest/f
66 $ hg commit -m 'update widest v3'
66 $ hg commit -m 'update widest v3'
67
67
68 $ echo 'inside v2' > inside/f
68 $ echo 'inside v2' > inside/f
69 $ hg commit -m 'update inside'
69 $ hg commit -m 'update inside'
70
70
71 $ mkdir outside2
71 $ mkdir outside2
72 $ echo 'outside2' > outside2/f
72 $ echo 'outside2' > outside2/f
73 $ hg add outside2/f
73 $ hg add outside2/f
74 $ hg commit -m 'add outside2'
74 $ hg commit -m 'add outside2'
75
75
76 $ echo 'widest v4' > widest/f
76 $ echo 'widest v4' > widest/f
77 $ hg commit -m 'update widest v4'
77 $ hg commit -m 'update widest v4'
78
78
79 $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
79 $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
80 7: update widest v4
80 7: update widest v4
81 6: add outside2
81 6: add outside2
82 5: update inside
82 5: update inside
83 4: update widest v3
83 4: update widest v3
84 3: add wider, update widest
84 3: add wider, update widest
85 2: add outside
85 2: add outside
86 1: add widest
86 1: add widest
87 0: add inside
87 0: add inside
88
88
89 $ cd ..
89 $ cd ..
90
90
91 Testing the --import-rules flag of `hg tracked` command
91 Testing the --import-rules flag of `hg tracked` command
92
92
93 $ cd narrow
93 $ cd narrow
94 $ hg tracked --import-rules
94 $ hg tracked --import-rules
95 hg tracked: option --import-rules requires argument
95 hg tracked: option --import-rules requires argument
96 hg tracked [OPTIONS]... [REMOTE]
96 hg tracked [OPTIONS]... [REMOTE]
97
97
98 show or change the current narrowspec
98 show or change the current narrowspec
99
99
100 options ([+] can be repeated):
100 options ([+] can be repeated):
101
101
102 --addinclude VALUE [+] new paths to include
102 --addinclude VALUE [+] new paths to include
103 --removeinclude VALUE [+] old paths to no longer include
103 --removeinclude VALUE [+] old paths to no longer include
104 --addexclude VALUE [+] new paths to exclude
104 --addexclude VALUE [+] new paths to exclude
105 --import-rules VALUE import narrowspecs from a file
105 --import-rules VALUE import narrowspecs from a file
106 --removeexclude VALUE [+] old paths to no longer exclude
106 --removeexclude VALUE [+] old paths to no longer exclude
107 --clear whether to replace the existing narrowspec
107 --clear whether to replace the existing narrowspec
108 --force-delete-local-changes forces deletion of local changes when
108 --force-delete-local-changes forces deletion of local changes when
109 narrowing
109 narrowing
110 --update-working-copy update working copy when the store has
111 changed
110 -e --ssh CMD specify ssh command to use
112 -e --ssh CMD specify ssh command to use
111 --remotecmd CMD specify hg command to run on the remote side
113 --remotecmd CMD specify hg command to run on the remote side
112 --insecure do not verify server certificate (ignoring
114 --insecure do not verify server certificate (ignoring
113 web.cacerts config)
115 web.cacerts config)
114
116
115 (use 'hg tracked -h' to show more help)
117 (use 'hg tracked -h' to show more help)
116 [255]
118 [255]
117 $ hg tracked --import-rules doesnotexist
119 $ hg tracked --import-rules doesnotexist
118 abort: cannot read narrowspecs from '$TESTTMP/narrow/doesnotexist': $ENOENT$
120 abort: cannot read narrowspecs from '$TESTTMP/narrow/doesnotexist': $ENOENT$
119 [255]
121 [255]
120
122
121 $ cat > specs <<EOF
123 $ cat > specs <<EOF
122 > %include foo
124 > %include foo
123 > [include]
125 > [include]
124 > path:widest/
126 > path:widest/
125 > [exclude]
127 > [exclude]
126 > path:inside/
128 > path:inside/
127 > EOF
129 > EOF
128
130
129 $ hg tracked --import-rules specs
131 $ hg tracked --import-rules specs
130 abort: including other spec files using '%include' is not supported in narrowspec
132 abort: including other spec files using '%include' is not supported in narrowspec
131 [255]
133 [255]
132
134
133 $ cat > specs <<EOF
135 $ cat > specs <<EOF
134 > [include]
136 > [include]
135 > outisde
137 > outisde
136 > [exclude]
138 > [exclude]
137 > inside
139 > inside
138 > EOF
140 > EOF
139
141
140 $ hg tracked --import-rules specs
142 $ hg tracked --import-rules specs
141 comparing with ssh://user@dummy/master
143 comparing with ssh://user@dummy/master
142 searching for changes
144 searching for changes
143 looking for local changes to affected paths
145 looking for local changes to affected paths
144 deleting data/inside/f.i
146 deleting data/inside/f.i
145 deleting meta/inside/00manifest.i (tree !)
147 deleting meta/inside/00manifest.i (tree !)
146 no changes found
148 no changes found
147 saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
149 saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
148 adding changesets
150 adding changesets
149 adding manifests
151 adding manifests
150 adding file changes
152 adding file changes
151 added 2 changesets with 0 changes to 0 files
153 added 2 changesets with 0 changes to 0 files
152 new changesets *:* (glob)
154 new changesets *:* (glob)
153 $ hg tracked
155 $ hg tracked
154 I path:outisde
156 I path:outisde
155 X path:inside
157 X path:inside
156
158
157 Testing the --import-rules flag with --addinclude and --addexclude
159 Testing the --import-rules flag with --addinclude and --addexclude
158
160
159 $ cat > specs <<EOF
161 $ cat > specs <<EOF
160 > [include]
162 > [include]
161 > widest
163 > widest
162 > EOF
164 > EOF
163
165
164 $ hg tracked --import-rules specs --addinclude 'wider/'
166 $ hg tracked --import-rules specs --addinclude 'wider/'
165 comparing with ssh://user@dummy/master
167 comparing with ssh://user@dummy/master
166 searching for changes
168 searching for changes
167 no changes found
169 no changes found
168 saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
170 saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
169 adding changesets
171 adding changesets
170 adding manifests
172 adding manifests
171 adding file changes
173 adding file changes
172 added 3 changesets with 1 changes to 1 files
174 added 3 changesets with 1 changes to 1 files
173 new changesets *:* (glob)
175 new changesets *:* (glob)
174 $ hg tracked
176 $ hg tracked
175 I path:outisde
177 I path:outisde
176 I path:wider
178 I path:wider
177 I path:widest
179 I path:widest
178 X path:inside
180 X path:inside
179
181
180 $ cat > specs <<EOF
182 $ cat > specs <<EOF
181 > [exclude]
183 > [exclude]
182 > outside2
184 > outside2
183 > EOF
185 > EOF
184
186
185 $ hg tracked --import-rules specs --addexclude 'widest'
187 $ hg tracked --import-rules specs --addexclude 'widest'
186 comparing with ssh://user@dummy/master
188 comparing with ssh://user@dummy/master
187 searching for changes
189 searching for changes
188 looking for local changes to affected paths
190 looking for local changes to affected paths
189 deleting data/widest/f.i
191 deleting data/widest/f.i
190 deleting meta/widest/00manifest.i (tree !)
192 deleting meta/widest/00manifest.i (tree !)
191 $ hg tracked
193 $ hg tracked
192 I path:outisde
194 I path:outisde
193 I path:wider
195 I path:wider
194 X path:inside
196 X path:inside
195 X path:outside2
197 X path:outside2
196 X path:widest
198 X path:widest
197
199
198 $ hg tracked --import-rules specs --clear
200 $ hg tracked --import-rules specs --clear
199 abort: the --clear option is not yet supported
201 abort: the --clear option is not yet supported
200 [255]
202 [255]
201
203
202 Testing with passing a out of wdir file
204 Testing with passing a out of wdir file
203
205
204 $ cat > ../nspecs <<EOF
206 $ cat > ../nspecs <<EOF
205 > [include]
207 > [include]
206 > widest
208 > widest
207 > EOF
209 > EOF
208
210
209 $ hg tracked --import-rules ../nspecs
211 $ hg tracked --import-rules ../nspecs
210 comparing with ssh://user@dummy/master
212 comparing with ssh://user@dummy/master
211 searching for changes
213 searching for changes
212 no changes found
214 no changes found
213 saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
215 saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
214 adding changesets
216 adding changesets
215 adding manifests
217 adding manifests
216 adding file changes
218 adding file changes
217 added 3 changesets with 0 changes to 0 files
219 added 3 changesets with 0 changes to 0 files
218 new changesets *:* (glob)
220 new changesets *:* (glob)
General Comments 0
You need to be logged in to leave comments. Login now