##// END OF EJS Templates
hg: write narrow patterns after repo creation...
Gregory Szorc -
r39591:cb675e95 default
parent child Browse files
Show More
@@ -1,423 +1,410 b''
1 1 # narrowcommands.py - command modifications for narrowhg extension
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 import itertools
10 10 import os
11 11
12 12 from mercurial.i18n import _
13 13 from mercurial import (
14 14 cmdutil,
15 15 commands,
16 16 discovery,
17 17 encoding,
18 18 error,
19 19 exchange,
20 20 extensions,
21 21 hg,
22 22 merge,
23 23 narrowspec,
24 24 node,
25 25 pycompat,
26 26 registrar,
27 27 repair,
28 28 repository,
29 29 repoview,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from . import (
35 35 narrowwirepeer,
36 36 )
37 37
38 38 table = {}
39 39 command = registrar.command(table)
40 40
41 41 def setup():
42 42 """Wraps user-facing mercurial commands with narrow-aware versions."""
43 43
44 44 entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
45 45 entry[1].append(('', 'narrow', None,
46 46 _("create a narrow clone of select files")))
47 47 entry[1].append(('', 'depth', '',
48 48 _("limit the history fetched by distance from heads")))
49 49 entry[1].append(('', 'narrowspec', '',
50 50 _("read narrowspecs from file")))
51 51 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
52 52 if 'sparse' not in extensions.enabled():
53 53 entry[1].append(('', 'include', [],
54 54 _("specifically fetch this file/directory")))
55 55 entry[1].append(
56 56 ('', 'exclude', [],
57 57 _("do not fetch this file/directory, even if included")))
58 58
59 59 entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
60 60 entry[1].append(('', 'depth', '',
61 61 _("limit the history fetched by distance from heads")))
62 62
63 63 extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
64 64
65 65 def clonenarrowcmd(orig, ui, repo, *args, **opts):
66 66 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
67 67 opts = pycompat.byteskwargs(opts)
68 68 wrappedextraprepare = util.nullcontextmanager()
69 69 narrowspecfile = opts['narrowspec']
70 70
71 71 if narrowspecfile:
72 72 filepath = os.path.join(pycompat.getcwd(), narrowspecfile)
73 73 ui.status(_("reading narrowspec from '%s'\n") % filepath)
74 74 try:
75 75 fdata = util.readfile(filepath)
76 76 except IOError as inst:
77 77 raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
78 78 (filepath, encoding.strtolocal(inst.strerror)))
79 79
80 80 includes, excludes, profiles = sparse.parseconfig(ui, fdata, 'narrow')
81 81 if profiles:
82 82 raise error.Abort(_("cannot specify other files using '%include' in"
83 83 " narrowspec"))
84 84
85 85 narrowspec.validatepatterns(includes)
86 86 narrowspec.validatepatterns(excludes)
87 87
88 88 # narrowspec is passed so we should assume that user wants narrow clone
89 89 opts['narrow'] = True
90 90 opts['include'].extend(includes)
91 91 opts['exclude'].extend(excludes)
92 92
93 93 if opts['narrow']:
94 94 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
95 # Create narrow spec patterns from clone flags
96 includepats = narrowspec.parsepatterns(opts['include'])
97 excludepats = narrowspec.parsepatterns(opts['exclude'])
98
99 if not includepats and excludepats:
100 # If nothing was included, we assume the user meant to include
101 # everything, except what they asked to exclude.
102 includepats = {'path:.'}
103
104 pullop.repo.setnarrowpats(includepats, excludepats)
105
106 # This will populate 'includepats' etc with the values from the
107 # narrowspec we just saved.
108 95 orig(pullop, kwargs)
109 96
110 97 if opts.get('depth'):
111 98 kwargs['depth'] = opts['depth']
112 99 wrappedextraprepare = extensions.wrappedfunction(exchange,
113 100 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
114 101
115 102 with wrappedextraprepare:
116 103 return orig(ui, repo, *args, **pycompat.strkwargs(opts))
117 104
118 105 def pullnarrowcmd(orig, ui, repo, *args, **opts):
119 106 """Wraps pull command to allow modifying narrow spec."""
120 107 wrappedextraprepare = util.nullcontextmanager()
121 108 if repository.NARROW_REQUIREMENT in repo.requirements:
122 109
123 110 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
124 111 orig(pullop, kwargs)
125 112 if opts.get(r'depth'):
126 113 kwargs['depth'] = opts[r'depth']
127 114 wrappedextraprepare = extensions.wrappedfunction(exchange,
128 115 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
129 116
130 117 with wrappedextraprepare:
131 118 return orig(ui, repo, *args, **opts)
132 119
133 120 def archivenarrowcmd(orig, ui, repo, *args, **opts):
134 121 """Wraps archive command to narrow the default includes."""
135 122 if repository.NARROW_REQUIREMENT in repo.requirements:
136 123 repo_includes, repo_excludes = repo.narrowpats
137 124 includes = set(opts.get(r'include', []))
138 125 excludes = set(opts.get(r'exclude', []))
139 126 includes, excludes, unused_invalid = narrowspec.restrictpatterns(
140 127 includes, excludes, repo_includes, repo_excludes)
141 128 if includes:
142 129 opts[r'include'] = includes
143 130 if excludes:
144 131 opts[r'exclude'] = excludes
145 132 return orig(ui, repo, *args, **opts)
146 133
147 134 def pullbundle2extraprepare(orig, pullop, kwargs):
148 135 repo = pullop.repo
149 136 if repository.NARROW_REQUIREMENT not in repo.requirements:
150 137 return orig(pullop, kwargs)
151 138
152 139 if narrowwirepeer.NARROWCAP not in pullop.remote.capabilities():
153 140 raise error.Abort(_("server doesn't support narrow clones"))
154 141 orig(pullop, kwargs)
155 142 kwargs['narrow'] = True
156 143 include, exclude = repo.narrowpats
157 144 kwargs['oldincludepats'] = include
158 145 kwargs['oldexcludepats'] = exclude
159 146 kwargs['includepats'] = include
160 147 kwargs['excludepats'] = exclude
161 148 # calculate known nodes only in ellipses cases because in non-ellipses cases
162 149 # we have all the nodes
163 150 if narrowwirepeer.ELLIPSESCAP in pullop.remote.capabilities():
164 151 kwargs['known'] = [node.hex(ctx.node()) for ctx in
165 152 repo.set('::%ln', pullop.common)
166 153 if ctx.node() != node.nullid]
167 154 if not kwargs['known']:
168 155 # Mercurial serializes an empty list as '' and deserializes it as
169 156 # [''], so delete it instead to avoid handling the empty string on
170 157 # the server.
171 158 del kwargs['known']
172 159
173 160 extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
174 161 pullbundle2extraprepare)
175 162
176 163 def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
177 164 newincludes, newexcludes, force):
178 165 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
179 166 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
180 167
181 168 # This is essentially doing "hg outgoing" to find all local-only
182 169 # commits. We will then check that the local-only commits don't
183 170 # have any changes to files that will be untracked.
184 171 unfi = repo.unfiltered()
185 172 outgoing = discovery.findcommonoutgoing(unfi, remote,
186 173 commoninc=commoninc)
187 174 ui.status(_('looking for local changes to affected paths\n'))
188 175 localnodes = []
189 176 for n in itertools.chain(outgoing.missing, outgoing.excluded):
190 177 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
191 178 localnodes.append(n)
192 179 revstostrip = unfi.revs('descendants(%ln)', localnodes)
193 180 hiddenrevs = repoview.filterrevs(repo, 'visible')
194 181 visibletostrip = list(repo.changelog.node(r)
195 182 for r in (revstostrip - hiddenrevs))
196 183 if visibletostrip:
197 184 ui.status(_('The following changeset(s) or their ancestors have '
198 185 'local changes not on the remote:\n'))
199 186 maxnodes = 10
200 187 if ui.verbose or len(visibletostrip) <= maxnodes:
201 188 for n in visibletostrip:
202 189 ui.status('%s\n' % node.short(n))
203 190 else:
204 191 for n in visibletostrip[:maxnodes]:
205 192 ui.status('%s\n' % node.short(n))
206 193 ui.status(_('...and %d more, use --verbose to list all\n') %
207 194 (len(visibletostrip) - maxnodes))
208 195 if not force:
209 196 raise error.Abort(_('local changes found'),
210 197 hint=_('use --force-delete-local-changes to '
211 198 'ignore'))
212 199
213 200 with ui.uninterruptable():
214 201 if revstostrip:
215 202 tostrip = [unfi.changelog.node(r) for r in revstostrip]
216 203 if repo['.'].node() in tostrip:
217 204 # stripping working copy, so move to a different commit first
218 205 urev = max(repo.revs('(::%n) - %ln + null',
219 206 repo['.'].node(), visibletostrip))
220 207 hg.clean(repo, urev)
221 208 repair.strip(ui, unfi, tostrip, topic='narrow')
222 209
223 210 todelete = []
224 211 for f, f2, size in repo.store.datafiles():
225 212 if f.startswith('data/'):
226 213 file = f[5:-2]
227 214 if not newmatch(file):
228 215 todelete.append(f)
229 216 elif f.startswith('meta/'):
230 217 dir = f[5:-13]
231 218 dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
232 219 include = True
233 220 for d in dirs:
234 221 visit = newmatch.visitdir(d)
235 222 if not visit:
236 223 include = False
237 224 break
238 225 if visit == 'all':
239 226 break
240 227 if not include:
241 228 todelete.append(f)
242 229
243 230 repo.destroying()
244 231
245 232 with repo.transaction("narrowing"):
246 233 for f in todelete:
247 234 ui.status(_('deleting %s\n') % f)
248 235 util.unlinkpath(repo.svfs.join(f))
249 236 repo.store.markremoved(f)
250 237
251 238 for f in repo.dirstate:
252 239 if not newmatch(f):
253 240 repo.dirstate.drop(f)
254 241 repo.wvfs.unlinkpath(f)
255 242 repo.setnarrowpats(newincludes, newexcludes)
256 243
257 244 repo.destroyed()
258 245
259 246 def _widen(ui, repo, remote, commoninc, newincludes, newexcludes):
260 247 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
261 248
262 249 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
263 250 orig(pullop, kwargs)
264 251 # The old{in,ex}cludepats have already been set by orig()
265 252 kwargs['includepats'] = newincludes
266 253 kwargs['excludepats'] = newexcludes
267 254 kwargs['widen'] = True
268 255 wrappedextraprepare = extensions.wrappedfunction(exchange,
269 256 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
270 257
271 258 # define a function that narrowbundle2 can call after creating the
272 259 # backup bundle, but before applying the bundle from the server
273 260 def setnewnarrowpats():
274 261 repo.setnarrowpats(newincludes, newexcludes)
275 262 repo.setnewnarrowpats = setnewnarrowpats
276 263
277 264 with ui.uninterruptable():
278 265 ds = repo.dirstate
279 266 p1, p2 = ds.p1(), ds.p2()
280 267 with ds.parentchange():
281 268 ds.setparents(node.nullid, node.nullid)
282 269 common = commoninc[0]
283 270 with wrappedextraprepare:
284 271 exchange.pull(repo, remote, heads=common)
285 272 with ds.parentchange():
286 273 ds.setparents(p1, p2)
287 274
288 275 repo.setnewnarrowpats()
289 276 actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()}
290 277 addgaction = actions['g'].append
291 278
292 279 mf = repo['.'].manifest().matches(newmatch)
293 280 for f, fn in mf.iteritems():
294 281 if f not in repo.dirstate:
295 282 addgaction((f, (mf.flags(f), False),
296 283 "add from widened narrow clone"))
297 284
298 285 merge.applyupdates(repo, actions, wctx=repo[None],
299 286 mctx=repo['.'], overwrite=False)
300 287 merge.recordupdates(repo, actions, branchmerge=False)
301 288
302 289 # TODO(rdamazio): Make new matcher format and update description
303 290 @command('tracked',
304 291 [('', 'addinclude', [], _('new paths to include')),
305 292 ('', 'removeinclude', [], _('old paths to no longer include')),
306 293 ('', 'addexclude', [], _('new paths to exclude')),
307 294 ('', 'import-rules', '', _('import narrowspecs from a file')),
308 295 ('', 'removeexclude', [], _('old paths to no longer exclude')),
309 296 ('', 'clear', False, _('whether to replace the existing narrowspec')),
310 297 ('', 'force-delete-local-changes', False,
311 298 _('forces deletion of local changes when narrowing')),
312 299 ] + commands.remoteopts,
313 300 _('[OPTIONS]... [REMOTE]'),
314 301 inferrepo=True)
315 302 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
316 303 """show or change the current narrowspec
317 304
318 305 With no argument, shows the current narrowspec entries, one per line. Each
319 306 line will be prefixed with 'I' or 'X' for included or excluded patterns,
320 307 respectively.
321 308
322 309 The narrowspec is comprised of expressions to match remote files and/or
323 310 directories that should be pulled into your client.
324 311 The narrowspec has *include* and *exclude* expressions, with excludes always
325 312 trumping includes: that is, if a file matches an exclude expression, it will
326 313 be excluded even if it also matches an include expression.
327 314 Excluding files that were never included has no effect.
328 315
329 316 Each included or excluded entry is in the format described by
330 317 'hg help patterns'.
331 318
332 319 The options allow you to add or remove included and excluded expressions.
333 320
334 321 If --clear is specified, then all previous includes and excludes are DROPPED
335 322 and replaced by the new ones specified to --addinclude and --addexclude.
336 323 If --clear is specified without any further options, the narrowspec will be
337 324 empty and will not match any files.
338 325 """
339 326 opts = pycompat.byteskwargs(opts)
340 327 if repository.NARROW_REQUIREMENT not in repo.requirements:
341 328 ui.warn(_('The narrow command is only supported on respositories cloned'
342 329 ' with --narrow.\n'))
343 330 return 1
344 331
345 332 # Before supporting, decide whether it "hg tracked --clear" should mean
346 333 # tracking no paths or all paths.
347 334 if opts['clear']:
348 335 ui.warn(_('The --clear option is not yet supported.\n'))
349 336 return 1
350 337
351 338 # import rules from a file
352 339 newrules = opts.get('import_rules')
353 340 if newrules:
354 341 try:
355 342 filepath = os.path.join(pycompat.getcwd(), newrules)
356 343 fdata = util.readfile(filepath)
357 344 except IOError as inst:
358 345 raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
359 346 (filepath, encoding.strtolocal(inst.strerror)))
360 347 includepats, excludepats, profiles = sparse.parseconfig(ui, fdata,
361 348 'narrow')
362 349 if profiles:
363 350 raise error.Abort(_("including other spec files using '%include' "
364 351 "is not supported in narrowspec"))
365 352 opts['addinclude'].extend(includepats)
366 353 opts['addexclude'].extend(excludepats)
367 354
368 355 addedincludes = narrowspec.parsepatterns(opts['addinclude'])
369 356 removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
370 357 addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
371 358 removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
372 359 widening = addedincludes or removedexcludes
373 360 narrowing = removedincludes or addedexcludes
374 361 only_show = not widening and not narrowing
375 362
376 363 # Only print the current narrowspec.
377 364 if only_show:
378 365 include, exclude = repo.narrowpats
379 366
380 367 ui.pager('tracked')
381 368 fm = ui.formatter('narrow', opts)
382 369 for i in sorted(include):
383 370 fm.startitem()
384 371 fm.write('status', '%s ', 'I', label='narrow.included')
385 372 fm.write('pat', '%s\n', i, label='narrow.included')
386 373 for i in sorted(exclude):
387 374 fm.startitem()
388 375 fm.write('status', '%s ', 'X', label='narrow.excluded')
389 376 fm.write('pat', '%s\n', i, label='narrow.excluded')
390 377 fm.end()
391 378 return 0
392 379
393 380 with repo.wlock(), repo.lock():
394 381 cmdutil.bailifchanged(repo)
395 382
396 383 # Find the revisions we have in common with the remote. These will
397 384 # be used for finding local-only changes for narrowing. They will
398 385 # also define the set of revisions to update for widening.
399 386 remotepath = ui.expandpath(remotepath or 'default')
400 387 url, branches = hg.parseurl(remotepath)
401 388 ui.status(_('comparing with %s\n') % util.hidepassword(url))
402 389 remote = hg.peer(repo, opts, url)
403 390 commoninc = discovery.findcommonincoming(repo, remote)
404 391
405 392 oldincludes, oldexcludes = repo.narrowpats
406 393 if narrowing:
407 394 newincludes = oldincludes - removedincludes
408 395 newexcludes = oldexcludes | addedexcludes
409 396 _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
410 397 newincludes, newexcludes,
411 398 opts['force_delete_local_changes'])
412 399 # _narrow() updated the narrowspec and _widen() below needs to
413 400 # use the updated values as its base (otherwise removed includes
414 401 # and addedexcludes will be lost in the resulting narrowspec)
415 402 oldincludes = newincludes
416 403 oldexcludes = newexcludes
417 404
418 405 if widening:
419 406 newincludes = oldincludes | addedincludes
420 407 newexcludes = oldexcludes - removedexcludes
421 408 _widen(ui, repo, remote, commoninc, newincludes, newexcludes)
422 409
423 410 return 0
@@ -1,1227 +1,1231 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15 import stat
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 nullid,
20 20 )
21 21
22 22 from . import (
23 23 bookmarks,
24 24 bundlerepo,
25 25 cacheutil,
26 26 cmdutil,
27 27 destutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 extensions,
32 32 httppeer,
33 33 localrepo,
34 34 lock,
35 35 logcmdutil,
36 36 logexchange,
37 37 merge as mergemod,
38 38 narrowspec,
39 39 node,
40 40 phases,
41 41 scmutil,
42 42 sshpeer,
43 43 statichttprepo,
44 44 ui as uimod,
45 45 unionrepo,
46 46 url,
47 47 util,
48 48 verify as verifymod,
49 49 vfs as vfsmod,
50 50 )
51 51
52 52 from .utils import (
53 53 stringutil,
54 54 )
55 55
56 56 release = lock.release
57 57
58 58 # shared features
59 59 sharedbookmarks = 'bookmarks'
60 60
61 61 def _local(path):
62 62 path = util.expandpath(util.urllocalpath(path))
63 63 return (os.path.isfile(path) and bundlerepo or localrepo)
64 64
65 65 def addbranchrevs(lrepo, other, branches, revs):
66 66 peer = other.peer() # a courtesy to callers using a localrepo for other
67 67 hashbranch, branches = branches
68 68 if not hashbranch and not branches:
69 69 x = revs or None
70 70 if revs:
71 71 y = revs[0]
72 72 else:
73 73 y = None
74 74 return x, y
75 75 if revs:
76 76 revs = list(revs)
77 77 else:
78 78 revs = []
79 79
80 80 if not peer.capable('branchmap'):
81 81 if branches:
82 82 raise error.Abort(_("remote branch lookup not supported"))
83 83 revs.append(hashbranch)
84 84 return revs, revs[0]
85 85
86 86 with peer.commandexecutor() as e:
87 87 branchmap = e.callcommand('branchmap', {}).result()
88 88
89 89 def primary(branch):
90 90 if branch == '.':
91 91 if not lrepo:
92 92 raise error.Abort(_("dirstate branch not accessible"))
93 93 branch = lrepo.dirstate.branch()
94 94 if branch in branchmap:
95 95 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
96 96 return True
97 97 else:
98 98 return False
99 99
100 100 for branch in branches:
101 101 if not primary(branch):
102 102 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
103 103 if hashbranch:
104 104 if not primary(hashbranch):
105 105 revs.append(hashbranch)
106 106 return revs, revs[0]
107 107
108 108 def parseurl(path, branches=None):
109 109 '''parse url#branch, returning (url, (branch, branches))'''
110 110
111 111 u = util.url(path)
112 112 branch = None
113 113 if u.fragment:
114 114 branch = u.fragment
115 115 u.fragment = None
116 116 return bytes(u), (branch, branches or [])
117 117
118 118 schemes = {
119 119 'bundle': bundlerepo,
120 120 'union': unionrepo,
121 121 'file': _local,
122 122 'http': httppeer,
123 123 'https': httppeer,
124 124 'ssh': sshpeer,
125 125 'static-http': statichttprepo,
126 126 }
127 127
128 128 def _peerlookup(path):
129 129 u = util.url(path)
130 130 scheme = u.scheme or 'file'
131 131 thing = schemes.get(scheme) or schemes['file']
132 132 try:
133 133 return thing(path)
134 134 except TypeError:
135 135 # we can't test callable(thing) because 'thing' can be an unloaded
136 136 # module that implements __call__
137 137 if not util.safehasattr(thing, 'instance'):
138 138 raise
139 139 return thing
140 140
141 141 def islocal(repo):
142 142 '''return true if repo (or path pointing to repo) is local'''
143 143 if isinstance(repo, bytes):
144 144 try:
145 145 return _peerlookup(repo).islocal(repo)
146 146 except AttributeError:
147 147 return False
148 148 return repo.local()
149 149
150 150 def openpath(ui, path):
151 151 '''open path with open if local, url.open if remote'''
152 152 pathurl = util.url(path, parsequery=False, parsefragment=False)
153 153 if pathurl.islocal():
154 154 return util.posixfile(pathurl.localpath(), 'rb')
155 155 else:
156 156 return url.open(ui, path)
157 157
158 158 # a list of (ui, repo) functions called for wire peer initialization
159 159 wirepeersetupfuncs = []
160 160
161 161 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
162 162 intents=None, createopts=None):
163 163 """return a repository object for the specified path"""
164 164 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
165 165 createopts=createopts)
166 166 ui = getattr(obj, "ui", ui)
167 167 if ui.configbool('devel', 'debug.extensions'):
168 168 log = lambda msg, *values: ui.debug('debug.extensions: ',
169 169 msg % values, label='debug.extensions')
170 170 else:
171 171 log = lambda *a, **kw: None
172 172 for f in presetupfuncs or []:
173 173 f(ui, obj)
174 174 log('- executing reposetup hooks\n')
175 175 with util.timedcm('all reposetup') as allreposetupstats:
176 176 for name, module in extensions.extensions(ui):
177 177 log(' - running reposetup for %s\n' % (name,))
178 178 hook = getattr(module, 'reposetup', None)
179 179 if hook:
180 180 with util.timedcm('reposetup %r', name) as stats:
181 181 hook(ui, obj)
182 182 log(' > reposetup for %r took %s\n', name, stats)
183 183 log('> all reposetup took %s\n', allreposetupstats)
184 184 if not obj.local():
185 185 for f in wirepeersetupfuncs:
186 186 f(ui, obj)
187 187 return obj
188 188
189 189 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
190 190 createopts=None):
191 191 """return a repository object for the specified path"""
192 192 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
193 193 intents=intents, createopts=createopts)
194 194 repo = peer.local()
195 195 if not repo:
196 196 raise error.Abort(_("repository '%s' is not local") %
197 197 (path or peer.url()))
198 198 return repo.filtered('visible')
199 199
200 200 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
201 201 '''return a repository peer for the specified path'''
202 202 rui = remoteui(uiorrepo, opts)
203 203 return _peerorrepo(rui, path, create, intents=intents,
204 204 createopts=createopts).peer()
205 205
206 206 def defaultdest(source):
207 207 '''return default destination of clone if none is given
208 208
209 209 >>> defaultdest(b'foo')
210 210 'foo'
211 211 >>> defaultdest(b'/foo/bar')
212 212 'bar'
213 213 >>> defaultdest(b'/')
214 214 ''
215 215 >>> defaultdest(b'')
216 216 ''
217 217 >>> defaultdest(b'http://example.org/')
218 218 ''
219 219 >>> defaultdest(b'http://example.org/foo/')
220 220 'foo'
221 221 '''
222 222 path = util.url(source).path
223 223 if not path:
224 224 return ''
225 225 return os.path.basename(os.path.normpath(path))
226 226
227 227 def sharedreposource(repo):
228 228 """Returns repository object for source repository of a shared repo.
229 229
230 230 If repo is not a shared repository, returns None.
231 231 """
232 232 if repo.sharedpath == repo.path:
233 233 return None
234 234
235 235 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
236 236 return repo.srcrepo
237 237
238 238 # the sharedpath always ends in the .hg; we want the path to the repo
239 239 source = repo.vfs.split(repo.sharedpath)[0]
240 240 srcurl, branches = parseurl(source)
241 241 srcrepo = repository(repo.ui, srcurl)
242 242 repo.srcrepo = srcrepo
243 243 return srcrepo
244 244
245 245 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
246 246 relative=False):
247 247 '''create a shared repository'''
248 248
249 249 if not islocal(source):
250 250 raise error.Abort(_('can only share local repositories'))
251 251
252 252 if not dest:
253 253 dest = defaultdest(source)
254 254 else:
255 255 dest = ui.expandpath(dest)
256 256
257 257 if isinstance(source, bytes):
258 258 origsource = ui.expandpath(source)
259 259 source, branches = parseurl(origsource)
260 260 srcrepo = repository(ui, source)
261 261 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
262 262 else:
263 263 srcrepo = source.local()
264 264 origsource = source = srcrepo.url()
265 265 checkout = None
266 266
267 267 sharedpath = srcrepo.sharedpath # if our source is already sharing
268 268
269 269 destwvfs = vfsmod.vfs(dest, realpath=True)
270 270 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
271 271
272 272 if destvfs.lexists():
273 273 raise error.Abort(_('destination already exists'))
274 274
275 275 if not destwvfs.isdir():
276 276 destwvfs.makedirs()
277 277 destvfs.makedir()
278 278
279 279 requirements = ''
280 280 try:
281 281 requirements = srcrepo.vfs.read('requires')
282 282 except IOError as inst:
283 283 if inst.errno != errno.ENOENT:
284 284 raise
285 285
286 286 if relative:
287 287 try:
288 288 sharedpath = os.path.relpath(sharedpath, destvfs.base)
289 289 requirements += 'relshared\n'
290 290 except (IOError, ValueError) as e:
291 291 # ValueError is raised on Windows if the drive letters differ on
292 292 # each path
293 293 raise error.Abort(_('cannot calculate relative path'),
294 294 hint=stringutil.forcebytestr(e))
295 295 else:
296 296 requirements += 'shared\n'
297 297
298 298 destvfs.write('requires', requirements)
299 299 destvfs.write('sharedpath', sharedpath)
300 300
301 301 r = repository(ui, destwvfs.base)
302 302 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
303 303 _postshareupdate(r, update, checkout=checkout)
304 304 return r
305 305
306 306 def unshare(ui, repo):
307 307 """convert a shared repository to a normal one
308 308
309 309 Copy the store data to the repo and remove the sharedpath data.
310 310 """
311 311
312 312 destlock = lock = None
313 313 lock = repo.lock()
314 314 try:
315 315 # we use locks here because if we race with commit, we
316 316 # can end up with extra data in the cloned revlogs that's
317 317 # not pointed to by changesets, thus causing verify to
318 318 # fail
319 319
320 320 destlock = copystore(ui, repo, repo.path)
321 321
322 322 sharefile = repo.vfs.join('sharedpath')
323 323 util.rename(sharefile, sharefile + '.old')
324 324
325 325 repo.requirements.discard('shared')
326 326 repo.requirements.discard('relshared')
327 327 repo._writerequirements()
328 328 finally:
329 329 destlock and destlock.release()
330 330 lock and lock.release()
331 331
332 332 # update store, spath, svfs and sjoin of repo
333 333 repo.unfiltered().__init__(repo.baseui, repo.root)
334 334
335 335 # TODO: figure out how to access subrepos that exist, but were previously
336 336 # removed from .hgsub
337 337 c = repo['.']
338 338 subs = c.substate
339 339 for s in sorted(subs):
340 340 c.sub(s).unshare()
341 341
342 342 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
343 343 """Called after a new shared repo is created.
344 344
345 345 The new repo only has a requirements file and pointer to the source.
346 346 This function configures additional shared data.
347 347
348 348 Extensions can wrap this function and write additional entries to
349 349 destrepo/.hg/shared to indicate additional pieces of data to be shared.
350 350 """
351 351 default = defaultpath or sourcerepo.ui.config('paths', 'default')
352 352 if default:
353 353 template = ('[paths]\n'
354 354 'default = %s\n')
355 355 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
356 356
357 357 with destrepo.wlock():
358 358 if bookmarks:
359 359 destrepo.vfs.write('shared', sharedbookmarks + '\n')
360 360
361 361 def _postshareupdate(repo, update, checkout=None):
362 362 """Maybe perform a working directory update after a shared repo is created.
363 363
364 364 ``update`` can be a boolean or a revision to update to.
365 365 """
366 366 if not update:
367 367 return
368 368
369 369 repo.ui.status(_("updating working directory\n"))
370 370 if update is not True:
371 371 checkout = update
372 372 for test in (checkout, 'default', 'tip'):
373 373 if test is None:
374 374 continue
375 375 try:
376 376 uprev = repo.lookup(test)
377 377 break
378 378 except error.RepoLookupError:
379 379 continue
380 380 _update(repo, uprev)
381 381
382 382 def copystore(ui, srcrepo, destpath):
383 383 '''copy files from store of srcrepo in destpath
384 384
385 385 returns destlock
386 386 '''
387 387 destlock = None
388 388 try:
389 389 hardlink = None
390 390 topic = _('linking') if hardlink else _('copying')
391 391 with ui.makeprogress(topic) as progress:
392 392 num = 0
393 393 srcpublishing = srcrepo.publishing()
394 394 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
395 395 dstvfs = vfsmod.vfs(destpath)
396 396 for f in srcrepo.store.copylist():
397 397 if srcpublishing and f.endswith('phaseroots'):
398 398 continue
399 399 dstbase = os.path.dirname(f)
400 400 if dstbase and not dstvfs.exists(dstbase):
401 401 dstvfs.mkdir(dstbase)
402 402 if srcvfs.exists(f):
403 403 if f.endswith('data'):
404 404 # 'dstbase' may be empty (e.g. revlog format 0)
405 405 lockfile = os.path.join(dstbase, "lock")
406 406 # lock to avoid premature writing to the target
407 407 destlock = lock.lock(dstvfs, lockfile)
408 408 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
409 409 hardlink, progress)
410 410 num += n
411 411 if hardlink:
412 412 ui.debug("linked %d files\n" % num)
413 413 else:
414 414 ui.debug("copied %d files\n" % num)
415 415 return destlock
416 416 except: # re-raises
417 417 release(destlock)
418 418 raise
419 419
420 420 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
421 421 rev=None, update=True, stream=False):
422 422 """Perform a clone using a shared repo.
423 423
424 424 The store for the repository will be located at <sharepath>/.hg. The
425 425 specified revisions will be cloned or pulled from "source". A shared repo
426 426 will be created at "dest" and a working copy will be created if "update" is
427 427 True.
428 428 """
429 429 revs = None
430 430 if rev:
431 431 if not srcpeer.capable('lookup'):
432 432 raise error.Abort(_("src repository does not support "
433 433 "revision lookup and so doesn't "
434 434 "support clone by revision"))
435 435
436 436 # TODO this is batchable.
437 437 remoterevs = []
438 438 for r in rev:
439 439 with srcpeer.commandexecutor() as e:
440 440 remoterevs.append(e.callcommand('lookup', {
441 441 'key': r,
442 442 }).result())
443 443 revs = remoterevs
444 444
445 445 # Obtain a lock before checking for or cloning the pooled repo otherwise
446 446 # 2 clients may race creating or populating it.
447 447 pooldir = os.path.dirname(sharepath)
448 448 # lock class requires the directory to exist.
449 449 try:
450 450 util.makedir(pooldir, False)
451 451 except OSError as e:
452 452 if e.errno != errno.EEXIST:
453 453 raise
454 454
455 455 poolvfs = vfsmod.vfs(pooldir)
456 456 basename = os.path.basename(sharepath)
457 457
458 458 with lock.lock(poolvfs, '%s.lock' % basename):
459 459 if os.path.exists(sharepath):
460 460 ui.status(_('(sharing from existing pooled repository %s)\n') %
461 461 basename)
462 462 else:
463 463 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
464 464 # Always use pull mode because hardlinks in share mode don't work
465 465 # well. Never update because working copies aren't necessary in
466 466 # share mode.
467 467 clone(ui, peeropts, source, dest=sharepath, pull=True,
468 468 revs=rev, update=False, stream=stream)
469 469
470 470 # Resolve the value to put in [paths] section for the source.
471 471 if islocal(source):
472 472 defaultpath = os.path.abspath(util.urllocalpath(source))
473 473 else:
474 474 defaultpath = source
475 475
476 476 sharerepo = repository(ui, path=sharepath)
477 477 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
478 478 defaultpath=defaultpath)
479 479
480 480 # We need to perform a pull against the dest repo to fetch bookmarks
481 481 # and other non-store data that isn't shared by default. In the case of
482 482 # non-existing shared repo, this means we pull from the remote twice. This
483 483 # is a bit weird. But at the time it was implemented, there wasn't an easy
484 484 # way to pull just non-changegroup data.
485 485 destrepo = repository(ui, path=dest)
486 486 exchange.pull(destrepo, srcpeer, heads=revs)
487 487
488 488 _postshareupdate(destrepo, update)
489 489
490 490 return srcpeer, peer(ui, peeropts, dest)
491 491
492 492 # Recomputing branch cache might be slow on big repos,
493 493 # so just copy it
494 494 def _copycache(srcrepo, dstcachedir, fname):
495 495 """copy a cache from srcrepo to destcachedir (if it exists)"""
496 496 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
497 497 dstbranchcache = os.path.join(dstcachedir, fname)
498 498 if os.path.exists(srcbranchcache):
499 499 if not os.path.exists(dstcachedir):
500 500 os.mkdir(dstcachedir)
501 501 util.copyfile(srcbranchcache, dstbranchcache)
502 502
503 503 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
504 504 update=True, stream=False, branch=None, shareopts=None,
505 505 storeincludepats=None, storeexcludepats=None):
506 506 """Make a copy of an existing repository.
507 507
508 508 Create a copy of an existing repository in a new directory. The
509 509 source and destination are URLs, as passed to the repository
510 510 function. Returns a pair of repository peers, the source and
511 511 newly created destination.
512 512
513 513 The location of the source is added to the new repository's
514 514 .hg/hgrc file, as the default to be used for future pulls and
515 515 pushes.
516 516
517 517 If an exception is raised, the partly cloned/updated destination
518 518 repository will be deleted.
519 519
520 520 Arguments:
521 521
522 522 source: repository object or URL
523 523
524 524 dest: URL of destination repository to create (defaults to base
525 525 name of source repository)
526 526
527 527 pull: always pull from source repository, even in local case or if the
528 528 server prefers streaming
529 529
530 530 stream: stream raw data uncompressed from repository (fast over
531 531 LAN, slow over WAN)
532 532
533 533 revs: revision to clone up to (implies pull=True)
534 534
535 535 update: update working directory after clone completes, if
536 536 destination is local repository (True means update to default rev,
537 537 anything else is treated as a revision)
538 538
539 539 branch: branches to clone
540 540
541 541 shareopts: dict of options to control auto sharing behavior. The "pool" key
542 542 activates auto sharing mode and defines the directory for stores. The
543 543 "mode" key determines how to construct the directory name of the shared
544 544 repository. "identity" means the name is derived from the node of the first
545 545 changeset in the repository. "remote" means the name is derived from the
546 546 remote's path/URL. Defaults to "identity."
547 547
548 548 storeincludepats and storeexcludepats: sets of file patterns to include and
549 549 exclude in the repository copy, respectively. If not defined, all files
550 550 will be included (a "full" clone). Otherwise a "narrow" clone containing
551 551 only the requested files will be performed. If ``storeincludepats`` is not
552 552 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
553 553 ``path:.``. If both are empty sets, no files will be cloned.
554 554 """
555 555
556 556 if isinstance(source, bytes):
557 557 origsource = ui.expandpath(source)
558 558 source, branches = parseurl(origsource, branch)
559 559 srcpeer = peer(ui, peeropts, source)
560 560 else:
561 561 srcpeer = source.peer() # in case we were called with a localrepo
562 562 branches = (None, branch or [])
563 563 origsource = source = srcpeer.url()
564 564 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
565 565
566 566 if dest is None:
567 567 dest = defaultdest(source)
568 568 if dest:
569 569 ui.status(_("destination directory: %s\n") % dest)
570 570 else:
571 571 dest = ui.expandpath(dest)
572 572
573 573 dest = util.urllocalpath(dest)
574 574 source = util.urllocalpath(source)
575 575
576 576 if not dest:
577 577 raise error.Abort(_("empty destination path is not valid"))
578 578
579 579 destvfs = vfsmod.vfs(dest, expandpath=True)
580 580 if destvfs.lexists():
581 581 if not destvfs.isdir():
582 582 raise error.Abort(_("destination '%s' already exists") % dest)
583 583 elif destvfs.listdir():
584 584 raise error.Abort(_("destination '%s' is not empty") % dest)
585 585
586 586 createopts = {}
587 587 narrow = False
588 588
589 589 if storeincludepats is not None:
590 590 narrowspec.validatepatterns(storeincludepats)
591 591 narrow = True
592 592
593 593 if storeexcludepats is not None:
594 594 narrowspec.validatepatterns(storeexcludepats)
595 595 narrow = True
596 596
597 597 if narrow:
598 598 # Include everything by default if only exclusion patterns defined.
599 599 if storeexcludepats and not storeincludepats:
600 600 storeincludepats = {'path:.'}
601 601
602 602 createopts['narrowfiles'] = True
603 603
604 604 shareopts = shareopts or {}
605 605 sharepool = shareopts.get('pool')
606 606 sharenamemode = shareopts.get('mode')
607 607 if sharepool and islocal(dest):
608 608 sharepath = None
609 609 if sharenamemode == 'identity':
610 610 # Resolve the name from the initial changeset in the remote
611 611 # repository. This returns nullid when the remote is empty. It
612 612 # raises RepoLookupError if revision 0 is filtered or otherwise
613 613 # not available. If we fail to resolve, sharing is not enabled.
614 614 try:
615 615 with srcpeer.commandexecutor() as e:
616 616 rootnode = e.callcommand('lookup', {
617 617 'key': '0',
618 618 }).result()
619 619
620 620 if rootnode != node.nullid:
621 621 sharepath = os.path.join(sharepool, node.hex(rootnode))
622 622 else:
623 623 ui.status(_('(not using pooled storage: '
624 624 'remote appears to be empty)\n'))
625 625 except error.RepoLookupError:
626 626 ui.status(_('(not using pooled storage: '
627 627 'unable to resolve identity of remote)\n'))
628 628 elif sharenamemode == 'remote':
629 629 sharepath = os.path.join(
630 630 sharepool, node.hex(hashlib.sha1(source).digest()))
631 631 else:
632 632 raise error.Abort(_('unknown share naming mode: %s') %
633 633 sharenamemode)
634 634
635 635 # TODO this is a somewhat arbitrary restriction.
636 636 if narrow:
637 637 ui.status(_('(pooled storage not supported for narrow clones)\n'))
638 638 sharepath = None
639 639
640 640 if sharepath:
641 641 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
642 642 dest, pull=pull, rev=revs, update=update,
643 643 stream=stream)
644 644
645 645 srclock = destlock = cleandir = None
646 646 srcrepo = srcpeer.local()
647 647 try:
648 648 abspath = origsource
649 649 if islocal(origsource):
650 650 abspath = os.path.abspath(util.urllocalpath(origsource))
651 651
652 652 if islocal(dest):
653 653 cleandir = dest
654 654
655 655 copy = False
656 656 if (srcrepo and srcrepo.cancopy() and islocal(dest)
657 657 and not phases.hassecret(srcrepo)):
658 658 copy = not pull and not revs
659 659
660 660 # TODO this is a somewhat arbitrary restriction.
661 661 if narrow:
662 662 copy = False
663 663
664 664 if copy:
665 665 try:
666 666 # we use a lock here because if we race with commit, we
667 667 # can end up with extra data in the cloned revlogs that's
668 668 # not pointed to by changesets, thus causing verify to
669 669 # fail
670 670 srclock = srcrepo.lock(wait=False)
671 671 except error.LockError:
672 672 copy = False
673 673
674 674 if copy:
675 675 srcrepo.hook('preoutgoing', throw=True, source='clone')
676 676 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
677 677 if not os.path.exists(dest):
678 678 util.makedirs(dest)
679 679 else:
680 680 # only clean up directories we create ourselves
681 681 cleandir = hgdir
682 682 try:
683 683 destpath = hgdir
684 684 util.makedir(destpath, notindexed=True)
685 685 except OSError as inst:
686 686 if inst.errno == errno.EEXIST:
687 687 cleandir = None
688 688 raise error.Abort(_("destination '%s' already exists")
689 689 % dest)
690 690 raise
691 691
692 692 destlock = copystore(ui, srcrepo, destpath)
693 693 # copy bookmarks over
694 694 srcbookmarks = srcrepo.vfs.join('bookmarks')
695 695 dstbookmarks = os.path.join(destpath, 'bookmarks')
696 696 if os.path.exists(srcbookmarks):
697 697 util.copyfile(srcbookmarks, dstbookmarks)
698 698
699 699 dstcachedir = os.path.join(destpath, 'cache')
700 700 for cache in cacheutil.cachetocopy(srcrepo):
701 701 _copycache(srcrepo, dstcachedir, cache)
702 702
703 703 # we need to re-init the repo after manually copying the data
704 704 # into it
705 705 destpeer = peer(srcrepo, peeropts, dest)
706 706 srcrepo.hook('outgoing', source='clone',
707 707 node=node.hex(node.nullid))
708 708 else:
709 709 try:
710 710 # only pass ui when no srcrepo
711 711 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
712 712 createopts=createopts)
713 713 except OSError as inst:
714 714 if inst.errno == errno.EEXIST:
715 715 cleandir = None
716 716 raise error.Abort(_("destination '%s' already exists")
717 717 % dest)
718 718 raise
719 719
720 720 if revs:
721 721 if not srcpeer.capable('lookup'):
722 722 raise error.Abort(_("src repository does not support "
723 723 "revision lookup and so doesn't "
724 724 "support clone by revision"))
725 725
726 726 # TODO this is batchable.
727 727 remoterevs = []
728 728 for rev in revs:
729 729 with srcpeer.commandexecutor() as e:
730 730 remoterevs.append(e.callcommand('lookup', {
731 731 'key': rev,
732 732 }).result())
733 733 revs = remoterevs
734 734
735 735 checkout = revs[0]
736 736 else:
737 737 revs = None
738 738 local = destpeer.local()
739 739 if local:
740 if narrow:
741 with local.lock():
742 local.setnarrowpats(storeincludepats, storeexcludepats)
743
740 744 u = util.url(abspath)
741 745 defaulturl = bytes(u)
742 746 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
743 747 if not stream:
744 748 if pull:
745 749 stream = False
746 750 else:
747 751 stream = None
748 752 # internal config: ui.quietbookmarkmove
749 753 overrides = {('ui', 'quietbookmarkmove'): True}
750 754 with local.ui.configoverride(overrides, 'clone'):
751 755 exchange.pull(local, srcpeer, revs,
752 756 streamclonerequested=stream,
753 757 includepats=storeincludepats,
754 758 excludepats=storeexcludepats)
755 759 elif srcrepo:
756 760 # TODO lift restriction once exchange.push() accepts narrow
757 761 # push.
758 762 if narrow:
759 763 raise error.Abort(_('narrow clone not available for '
760 764 'remote destinations'))
761 765
762 766 exchange.push(srcrepo, destpeer, revs=revs,
763 767 bookmarks=srcrepo._bookmarks.keys())
764 768 else:
765 769 raise error.Abort(_("clone from remote to remote not supported")
766 770 )
767 771
768 772 cleandir = None
769 773
770 774 destrepo = destpeer.local()
771 775 if destrepo:
772 776 template = uimod.samplehgrcs['cloned']
773 777 u = util.url(abspath)
774 778 u.passwd = None
775 779 defaulturl = bytes(u)
776 780 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
777 781 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
778 782
779 783 if ui.configbool('experimental', 'remotenames'):
780 784 logexchange.pullremotenames(destrepo, srcpeer)
781 785
782 786 if update:
783 787 if update is not True:
784 788 with srcpeer.commandexecutor() as e:
785 789 checkout = e.callcommand('lookup', {
786 790 'key': update,
787 791 }).result()
788 792
789 793 uprev = None
790 794 status = None
791 795 if checkout is not None:
792 796 # Some extensions (at least hg-git and hg-subversion) have
793 797 # a peer.lookup() implementation that returns a name instead
794 798 # of a nodeid. We work around it here until we've figured
795 799 # out a better solution.
796 800 if len(checkout) == 20 and checkout in destrepo:
797 801 uprev = checkout
798 802 elif scmutil.isrevsymbol(destrepo, checkout):
799 803 uprev = scmutil.revsymbol(destrepo, checkout).node()
800 804 else:
801 805 if update is not True:
802 806 try:
803 807 uprev = destrepo.lookup(update)
804 808 except error.RepoLookupError:
805 809 pass
806 810 if uprev is None:
807 811 try:
808 812 uprev = destrepo._bookmarks['@']
809 813 update = '@'
810 814 bn = destrepo[uprev].branch()
811 815 if bn == 'default':
812 816 status = _("updating to bookmark @\n")
813 817 else:
814 818 status = (_("updating to bookmark @ on branch %s\n")
815 819 % bn)
816 820 except KeyError:
817 821 try:
818 822 uprev = destrepo.branchtip('default')
819 823 except error.RepoLookupError:
820 824 uprev = destrepo.lookup('tip')
821 825 if not status:
822 826 bn = destrepo[uprev].branch()
823 827 status = _("updating to branch %s\n") % bn
824 828 destrepo.ui.status(status)
825 829 _update(destrepo, uprev)
826 830 if update in destrepo._bookmarks:
827 831 bookmarks.activate(destrepo, update)
828 832 finally:
829 833 release(srclock, destlock)
830 834 if cleandir is not None:
831 835 shutil.rmtree(cleandir, True)
832 836 if srcpeer is not None:
833 837 srcpeer.close()
834 838 return srcpeer, destpeer
835 839
836 840 def _showstats(repo, stats, quietempty=False):
837 841 if quietempty and stats.isempty():
838 842 return
839 843 repo.ui.status(_("%d files updated, %d files merged, "
840 844 "%d files removed, %d files unresolved\n") % (
841 845 stats.updatedcount, stats.mergedcount,
842 846 stats.removedcount, stats.unresolvedcount))
843 847
844 848 def updaterepo(repo, node, overwrite, updatecheck=None):
845 849 """Update the working directory to node.
846 850
847 851 When overwrite is set, changes are clobbered, merged else
848 852
849 853 returns stats (see pydoc mercurial.merge.applyupdates)"""
850 854 return mergemod.update(repo, node, False, overwrite,
851 855 labels=['working copy', 'destination'],
852 856 updatecheck=updatecheck)
853 857
854 858 def update(repo, node, quietempty=False, updatecheck=None):
855 859 """update the working directory to node"""
856 860 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
857 861 _showstats(repo, stats, quietempty)
858 862 if stats.unresolvedcount:
859 863 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
860 864 return stats.unresolvedcount > 0
861 865
862 866 # naming conflict in clone()
863 867 _update = update
864 868
865 869 def clean(repo, node, show_stats=True, quietempty=False):
866 870 """forcibly switch the working directory to node, clobbering changes"""
867 871 stats = updaterepo(repo, node, True)
868 872 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
869 873 if show_stats:
870 874 _showstats(repo, stats, quietempty)
871 875 return stats.unresolvedcount > 0
872 876
873 877 # naming conflict in updatetotally()
874 878 _clean = clean
875 879
876 880 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
877 881 """Update the working directory with extra care for non-file components
878 882
879 883 This takes care of non-file components below:
880 884
881 885 :bookmark: might be advanced or (in)activated
882 886
883 887 This takes arguments below:
884 888
885 889 :checkout: to which revision the working directory is updated
886 890 :brev: a name, which might be a bookmark to be activated after updating
887 891 :clean: whether changes in the working directory can be discarded
888 892 :updatecheck: how to deal with a dirty working directory
889 893
890 894 Valid values for updatecheck are (None => linear):
891 895
892 896 * abort: abort if the working directory is dirty
893 897 * none: don't check (merge working directory changes into destination)
894 898 * linear: check that update is linear before merging working directory
895 899 changes into destination
896 900 * noconflict: check that the update does not result in file merges
897 901
898 902 This returns whether conflict is detected at updating or not.
899 903 """
900 904 if updatecheck is None:
901 905 updatecheck = ui.config('commands', 'update.check')
902 906 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
903 907 # If not configured, or invalid value configured
904 908 updatecheck = 'linear'
905 909 with repo.wlock():
906 910 movemarkfrom = None
907 911 warndest = False
908 912 if checkout is None:
909 913 updata = destutil.destupdate(repo, clean=clean)
910 914 checkout, movemarkfrom, brev = updata
911 915 warndest = True
912 916
913 917 if clean:
914 918 ret = _clean(repo, checkout)
915 919 else:
916 920 if updatecheck == 'abort':
917 921 cmdutil.bailifchanged(repo, merge=False)
918 922 updatecheck = 'none'
919 923 ret = _update(repo, checkout, updatecheck=updatecheck)
920 924
921 925 if not ret and movemarkfrom:
922 926 if movemarkfrom == repo['.'].node():
923 927 pass # no-op update
924 928 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
925 929 b = ui.label(repo._activebookmark, 'bookmarks.active')
926 930 ui.status(_("updating bookmark %s\n") % b)
927 931 else:
928 932 # this can happen with a non-linear update
929 933 b = ui.label(repo._activebookmark, 'bookmarks')
930 934 ui.status(_("(leaving bookmark %s)\n") % b)
931 935 bookmarks.deactivate(repo)
932 936 elif brev in repo._bookmarks:
933 937 if brev != repo._activebookmark:
934 938 b = ui.label(brev, 'bookmarks.active')
935 939 ui.status(_("(activating bookmark %s)\n") % b)
936 940 bookmarks.activate(repo, brev)
937 941 elif brev:
938 942 if repo._activebookmark:
939 943 b = ui.label(repo._activebookmark, 'bookmarks')
940 944 ui.status(_("(leaving bookmark %s)\n") % b)
941 945 bookmarks.deactivate(repo)
942 946
943 947 if warndest:
944 948 destutil.statusotherdests(ui, repo)
945 949
946 950 return ret
947 951
948 952 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
949 953 abort=False):
950 954 """Branch merge with node, resolving changes. Return true if any
951 955 unresolved conflicts."""
952 956 if not abort:
953 957 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
954 958 labels=labels)
955 959 else:
956 960 ms = mergemod.mergestate.read(repo)
957 961 if ms.active():
958 962 # there were conflicts
959 963 node = ms.localctx.hex()
960 964 else:
961 965 # there were no conficts, mergestate was not stored
962 966 node = repo['.'].hex()
963 967
964 968 repo.ui.status(_("aborting the merge, updating back to"
965 969 " %s\n") % node[:12])
966 970 stats = mergemod.update(repo, node, branchmerge=False, force=True,
967 971 labels=labels)
968 972
969 973 _showstats(repo, stats)
970 974 if stats.unresolvedcount:
971 975 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
972 976 "or 'hg merge --abort' to abandon\n"))
973 977 elif remind and not abort:
974 978 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
975 979 return stats.unresolvedcount > 0
976 980
977 981 def _incoming(displaychlist, subreporecurse, ui, repo, source,
978 982 opts, buffered=False):
979 983 """
980 984 Helper for incoming / gincoming.
981 985 displaychlist gets called with
982 986 (remoterepo, incomingchangesetlist, displayer) parameters,
983 987 and is supposed to contain only code that can't be unified.
984 988 """
985 989 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
986 990 other = peer(repo, opts, source)
987 991 ui.status(_('comparing with %s\n') % util.hidepassword(source))
988 992 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
989 993
990 994 if revs:
991 995 revs = [other.lookup(rev) for rev in revs]
992 996 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
993 997 revs, opts["bundle"], opts["force"])
994 998 try:
995 999 if not chlist:
996 1000 ui.status(_("no changes found\n"))
997 1001 return subreporecurse()
998 1002 ui.pager('incoming')
999 1003 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1000 1004 buffered=buffered)
1001 1005 displaychlist(other, chlist, displayer)
1002 1006 displayer.close()
1003 1007 finally:
1004 1008 cleanupfn()
1005 1009 subreporecurse()
1006 1010 return 0 # exit code is zero since we found incoming changes
1007 1011
1008 1012 def incoming(ui, repo, source, opts):
1009 1013 def subreporecurse():
1010 1014 ret = 1
1011 1015 if opts.get('subrepos'):
1012 1016 ctx = repo[None]
1013 1017 for subpath in sorted(ctx.substate):
1014 1018 sub = ctx.sub(subpath)
1015 1019 ret = min(ret, sub.incoming(ui, source, opts))
1016 1020 return ret
1017 1021
1018 1022 def display(other, chlist, displayer):
1019 1023 limit = logcmdutil.getlimit(opts)
1020 1024 if opts.get('newest_first'):
1021 1025 chlist.reverse()
1022 1026 count = 0
1023 1027 for n in chlist:
1024 1028 if limit is not None and count >= limit:
1025 1029 break
1026 1030 parents = [p for p in other.changelog.parents(n) if p != nullid]
1027 1031 if opts.get('no_merges') and len(parents) == 2:
1028 1032 continue
1029 1033 count += 1
1030 1034 displayer.show(other[n])
1031 1035 return _incoming(display, subreporecurse, ui, repo, source, opts)
1032 1036
1033 1037 def _outgoing(ui, repo, dest, opts):
1034 1038 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1035 1039 if not path:
1036 1040 raise error.Abort(_('default repository not configured!'),
1037 1041 hint=_("see 'hg help config.paths'"))
1038 1042 dest = path.pushloc or path.loc
1039 1043 branches = path.branch, opts.get('branch') or []
1040 1044
1041 1045 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1042 1046 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1043 1047 if revs:
1044 1048 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1045 1049
1046 1050 other = peer(repo, opts, dest)
1047 1051 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1048 1052 force=opts.get('force'))
1049 1053 o = outgoing.missing
1050 1054 if not o:
1051 1055 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1052 1056 return o, other
1053 1057
1054 1058 def outgoing(ui, repo, dest, opts):
1055 1059 def recurse():
1056 1060 ret = 1
1057 1061 if opts.get('subrepos'):
1058 1062 ctx = repo[None]
1059 1063 for subpath in sorted(ctx.substate):
1060 1064 sub = ctx.sub(subpath)
1061 1065 ret = min(ret, sub.outgoing(ui, dest, opts))
1062 1066 return ret
1063 1067
1064 1068 limit = logcmdutil.getlimit(opts)
1065 1069 o, other = _outgoing(ui, repo, dest, opts)
1066 1070 if not o:
1067 1071 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1068 1072 return recurse()
1069 1073
1070 1074 if opts.get('newest_first'):
1071 1075 o.reverse()
1072 1076 ui.pager('outgoing')
1073 1077 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1074 1078 count = 0
1075 1079 for n in o:
1076 1080 if limit is not None and count >= limit:
1077 1081 break
1078 1082 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1079 1083 if opts.get('no_merges') and len(parents) == 2:
1080 1084 continue
1081 1085 count += 1
1082 1086 displayer.show(repo[n])
1083 1087 displayer.close()
1084 1088 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1085 1089 recurse()
1086 1090 return 0 # exit code is zero since we found outgoing changes
1087 1091
1088 1092 def verify(repo):
1089 1093 """verify the consistency of a repository"""
1090 1094 ret = verifymod.verify(repo)
1091 1095
1092 1096 # Broken subrepo references in hidden csets don't seem worth worrying about,
1093 1097 # since they can't be pushed/pulled, and --hidden can be used if they are a
1094 1098 # concern.
1095 1099
1096 1100 # pathto() is needed for -R case
1097 1101 revs = repo.revs("filelog(%s)",
1098 1102 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1099 1103
1100 1104 if revs:
1101 1105 repo.ui.status(_('checking subrepo links\n'))
1102 1106 for rev in revs:
1103 1107 ctx = repo[rev]
1104 1108 try:
1105 1109 for subpath in ctx.substate:
1106 1110 try:
1107 1111 ret = (ctx.sub(subpath, allowcreate=False).verify()
1108 1112 or ret)
1109 1113 except error.RepoError as e:
1110 1114 repo.ui.warn(('%d: %s\n') % (rev, e))
1111 1115 except Exception:
1112 1116 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1113 1117 node.short(ctx.node()))
1114 1118
1115 1119 return ret
1116 1120
1117 1121 def remoteui(src, opts):
1118 1122 'build a remote ui from ui or repo and opts'
1119 1123 if util.safehasattr(src, 'baseui'): # looks like a repository
1120 1124 dst = src.baseui.copy() # drop repo-specific config
1121 1125 src = src.ui # copy target options from repo
1122 1126 else: # assume it's a global ui object
1123 1127 dst = src.copy() # keep all global options
1124 1128
1125 1129 # copy ssh-specific options
1126 1130 for o in 'ssh', 'remotecmd':
1127 1131 v = opts.get(o) or src.config('ui', o)
1128 1132 if v:
1129 1133 dst.setconfig("ui", o, v, 'copied')
1130 1134
1131 1135 # copy bundle-specific options
1132 1136 r = src.config('bundle', 'mainreporoot')
1133 1137 if r:
1134 1138 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1135 1139
1136 1140 # copy selected local settings to the remote ui
1137 1141 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1138 1142 for key, val in src.configitems(sect):
1139 1143 dst.setconfig(sect, key, val, 'copied')
1140 1144 v = src.config('web', 'cacerts')
1141 1145 if v:
1142 1146 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1143 1147
1144 1148 return dst
1145 1149
1146 1150 # Files of interest
1147 1151 # Used to check if the repository has changed looking at mtime and size of
1148 1152 # these files.
1149 1153 foi = [('spath', '00changelog.i'),
1150 1154 ('spath', 'phaseroots'), # ! phase can change content at the same size
1151 1155 ('spath', 'obsstore'),
1152 1156 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1153 1157 ]
1154 1158
1155 1159 class cachedlocalrepo(object):
1156 1160 """Holds a localrepository that can be cached and reused."""
1157 1161
1158 1162 def __init__(self, repo):
1159 1163 """Create a new cached repo from an existing repo.
1160 1164
1161 1165 We assume the passed in repo was recently created. If the
1162 1166 repo has changed between when it was created and when it was
1163 1167 turned into a cache, it may not refresh properly.
1164 1168 """
1165 1169 assert isinstance(repo, localrepo.localrepository)
1166 1170 self._repo = repo
1167 1171 self._state, self.mtime = self._repostate()
1168 1172 self._filtername = repo.filtername
1169 1173
1170 1174 def fetch(self):
1171 1175 """Refresh (if necessary) and return a repository.
1172 1176
1173 1177 If the cached instance is out of date, it will be recreated
1174 1178 automatically and returned.
1175 1179
1176 1180 Returns a tuple of the repo and a boolean indicating whether a new
1177 1181 repo instance was created.
1178 1182 """
1179 1183 # We compare the mtimes and sizes of some well-known files to
1180 1184 # determine if the repo changed. This is not precise, as mtimes
1181 1185 # are susceptible to clock skew and imprecise filesystems and
1182 1186 # file content can change while maintaining the same size.
1183 1187
1184 1188 state, mtime = self._repostate()
1185 1189 if state == self._state:
1186 1190 return self._repo, False
1187 1191
1188 1192 repo = repository(self._repo.baseui, self._repo.url())
1189 1193 if self._filtername:
1190 1194 self._repo = repo.filtered(self._filtername)
1191 1195 else:
1192 1196 self._repo = repo.unfiltered()
1193 1197 self._state = state
1194 1198 self.mtime = mtime
1195 1199
1196 1200 return self._repo, True
1197 1201
1198 1202 def _repostate(self):
1199 1203 state = []
1200 1204 maxmtime = -1
1201 1205 for attr, fname in foi:
1202 1206 prefix = getattr(self._repo, attr)
1203 1207 p = os.path.join(prefix, fname)
1204 1208 try:
1205 1209 st = os.stat(p)
1206 1210 except OSError:
1207 1211 st = os.stat(prefix)
1208 1212 state.append((st[stat.ST_MTIME], st.st_size))
1209 1213 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1210 1214
1211 1215 return tuple(state), maxmtime
1212 1216
1213 1217 def copy(self):
1214 1218 """Obtain a copy of this class instance.
1215 1219
1216 1220 A new localrepository instance is obtained. The new instance should be
1217 1221 completely independent of the original.
1218 1222 """
1219 1223 repo = repository(self._repo.baseui, self._repo.origroot)
1220 1224 if self._filtername:
1221 1225 repo = repo.filtered(self._filtername)
1222 1226 else:
1223 1227 repo = repo.unfiltered()
1224 1228 c = cachedlocalrepo(repo)
1225 1229 c._state = self._state
1226 1230 c.mtime = self.mtime
1227 1231 return c
General Comments 0
You need to be logged in to leave comments. Login now